code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE : Tuple = """\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"""
class A_ ( unittest.TestCase , _lowerCamelCase ):
def _UpperCAmelCase ( self : Any ):
__a = load_tool("text-question-answering" )
self.tool.setup()
__a = load_tool("text-question-answering" , remote=_A )
def _UpperCAmelCase ( self : int ):
__a = self.tool(_A , "What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self : Tuple ):
__a = self.remote_tool(_A , "What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self : Tuple ):
__a = self.tool(text=_A , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self : List[str] ):
__a = self.remote_tool(text=_A , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
| 197 |
from ...processing_utils import ProcessorMixin
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''SpeechT5FeatureExtractor'''
_UpperCamelCase : Tuple = '''SpeechT5Tokenizer'''
def __init__( self : Union[str, Any] , _A : str , _A : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__(_A , _A )
def __call__( self : Optional[Any] , *_A : Optional[int] , **_A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] = kwargs.pop('''audio''' , _A )
lowercase : List[Any] = kwargs.pop('''text''' , _A )
lowercase : List[Any] = kwargs.pop('''text_target''' , _A )
lowercase : Optional[Any] = kwargs.pop('''audio_target''' , _A )
lowercase : Union[str, Any] = kwargs.pop('''sampling_rate''' , _A )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase : Dict = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
elif text is not None:
lowercase : Optional[Any] = self.tokenizer(_A , **_A )
else:
lowercase : Tuple = None
if audio_target is not None:
lowercase : List[Any] = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A )
lowercase : Any = targets['''input_values''']
elif text_target is not None:
lowercase : List[Any] = self.tokenizer(_A , **_A )
lowercase : Optional[int] = targets['''input_ids''']
else:
lowercase : List[str] = None
if inputs is None:
return targets
if targets is not None:
lowercase : List[str] = labels
lowercase : Any = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase : int = decoder_attention_mask
return inputs
def __a ( self : List[Any] , *_A : Tuple , **_A : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str = kwargs.pop('''input_values''' , _A )
lowercase : Dict = kwargs.pop('''input_ids''' , _A )
lowercase : Optional[int] = kwargs.pop('''labels''' , _A )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase : Tuple = self.feature_extractor.pad(_A , *_A , **_A )
elif input_ids is not None:
lowercase : Optional[Any] = self.tokenizer.pad(_A , **_A )
else:
lowercase : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]):
lowercase : int = self.tokenizer.pad(_A , **_A )
lowercase : List[Any] = targets['''input_ids''']
else:
lowercase : int = self.feature_extractor.feature_size
lowercase : Tuple = self.feature_extractor.num_mel_bins
lowercase : List[str] = self.feature_extractor.pad(_A , *_A , **_A )
lowercase : Union[str, Any] = feature_size_hack
lowercase : List[Any] = targets['''input_values''']
else:
lowercase : Dict = None
if inputs is None:
return targets
if targets is not None:
lowercase : Any = labels
lowercase : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase : List[Any] = decoder_attention_mask
return inputs
def __a ( self : Optional[Any] , *_A : Union[str, Any] , **_A : str ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __a ( self : Union[str, Any] , *_A : Any , **_A : Optional[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A ) | 217 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( lowerCamelCase_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_SCREAMING_SNAKE_CASE : int = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowercase__ ( self : Tuple , __UpperCAmelCase : Optional[Any]=0 ) -> int:
"""simple docstring"""
UpperCamelCase_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCAmelCase ) )
UpperCamelCase_ = torch.manual_seed(__UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ort.SessionOptions()
UpperCamelCase_ = False
return options
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_ = init_image.resize((128, 128) )
UpperCamelCase_ = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a : Optional[int] = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 559 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = []
for part_id in partition_order:
lowercase__ : Tuple = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(A__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Any = spark.range(100 ).repartition(1 )
lowercase__ : Union[str, Any] = Spark(A__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[int] = spark.range(10 ).repartition(2 )
lowercase__ : str = [1, 0]
lowercase__ : int = _generate_iterable_examples(A__ , A__ ) # Reverse the partitions.
lowercase__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , A__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__ , lowercase__ : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : List[Any] = spark.range(10 ).repartition(1 )
lowercase__ : Union[str, Any] = SparkExamplesIterable(A__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : int = lambda lowerCamelCase__ : x.reverse()
lowercase__ : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [2, 1, 0] )
lowercase__ : str = SparkExamplesIterable(A__ ).shuffle_data_sources(A__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A__ ):
lowercase__ , lowercase__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Any = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Union[str, Any] = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(A__ ):
lowercase__ , lowercase__ : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : str = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(A__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Any = spark.range(100 ).repartition(1 )
lowercase__ : Union[str, Any] = Spark(A__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 496 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : int=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE : int=3_6 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : List[str]=3_7 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : List[str]=6 , SCREAMING_SNAKE_CASE : Tuple=6 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Tuple=1_0_0_0 , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = text_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def __A ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = LayoutLMvaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
# text + image
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model(pixel_values=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
return True
def __A ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = LayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple=False ) -> Any:
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(SCREAMING_SNAKE_CASE )
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
elif model_class in get_values(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE ),
]:
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE ),
]:
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , )
return inputs_dict
def __A ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
def __A ( self : int ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
@slow
def __A ( self : Dict ) -> str:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __a ( ) -> Union[str, Any]:
lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : str ) -> int:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def __A ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([[1, 2]] )
lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase = model(
input_ids=input_ids.to(SCREAMING_SNAKE_CASE ) , bbox=bbox.to(SCREAMING_SNAKE_CASE ) , pixel_values=pixel_values.to(SCREAMING_SNAKE_CASE ) , )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 649 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] =["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Tuple = size if size is not None else {"height": 384, "width": 384}
lowerCAmelCase : Dict = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : Optional[Any] = size
lowerCAmelCase : Union[str, Any] = resample
lowerCAmelCase : List[Any] = do_rescale
lowerCAmelCase : Dict = rescale_factor
lowerCAmelCase : str = do_normalize
lowerCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase : Any = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase : Optional[int] = do_convert_rgb
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowerCAmelCase : Optional[int] = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : List[str] = resample if resample is not None else self.resample
lowerCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : str = size if size is not None else self.size
lowerCAmelCase : Optional[int] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase : Dict = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : int = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : Tuple = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowerCAmelCase : int = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : Tuple = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : Any = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : Dict = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
return encoded_outputs
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
def __magic_name__ ( lowercase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = set({"(", "[", "{"} )
UpperCamelCase = set({")", "]", "}"} )
UpperCamelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowercase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase_ ) == 0
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = input("Enter sequence of brackets: " )
if is_balanced(lowercase_ ):
print(lowercase_ , "is balanced" )
else:
print(lowercase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 606 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase = """document_qa"""
lowercase = AutoProcessor
lowercase = VisionEncoderDecoderModel
lowercase = ["""image""", """text"""]
lowercase = ["""text"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCamelCase = task_prompt.replace("{user_input}" , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
UpperCamelCase = self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE , ).sequences
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
UpperCamelCase = re.sub(R"<.*?>" , "" , SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCamelCase = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 606 | 1 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCamelCase__ ( __lowercase ):
'''simple docstring'''
def __init__(self : Any ):
__a : str = []
def lowerCAmelCase (self : Optional[int] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : int , **snake_case_ : Optional[Any] ):
self.events.append('''on_init_end''' )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any , **snake_case_ : List[str] ):
self.events.append('''on_train_begin''' )
def lowerCAmelCase (self : Dict , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int] , **snake_case_ : int ):
self.events.append('''on_train_end''' )
def lowerCAmelCase (self : str , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , **snake_case_ : Tuple ):
self.events.append('''on_epoch_begin''' )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any , **snake_case_ : Tuple ):
self.events.append('''on_epoch_end''' )
def lowerCAmelCase (self : Tuple , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Dict , **snake_case_ : List[Any] ):
self.events.append('''on_step_begin''' )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : List[str] , **snake_case_ : Optional[Any] ):
self.events.append('''on_step_end''' )
def lowerCAmelCase (self : List[Any] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : List[Any] , **snake_case_ : Optional[Any] ):
self.events.append('''on_evaluate''' )
def lowerCAmelCase (self : List[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict , **snake_case_ : Dict ):
self.events.append('''on_predict''' )
def lowerCAmelCase (self : Dict , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : int , **snake_case_ : Tuple ):
self.events.append('''on_save''' )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : int , **snake_case_ : int ):
self.events.append('''on_log''' )
def lowerCAmelCase (self : List[str] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[int] , **snake_case_ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase (self : str ):
__a : Union[str, Any] = tempfile.mkdtemp()
def lowerCAmelCase (self : Dict ):
shutil.rmtree(self.output_dir )
def lowerCAmelCase (self : str , snake_case_ : Any=0 , snake_case_ : Tuple=0 , snake_case_ : Optional[Any]=6_4 , snake_case_ : Optional[Any]=6_4 , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : str ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__a : List[Any] = RegressionDataset(length=snake_case_ )
__a : List[Any] = RegressionDataset(length=snake_case_ )
__a : Optional[Any] = RegressionModelConfig(a=snake_case_ , b=snake_case_ )
__a : Optional[int] = RegressionPreTrainedModel(snake_case_ )
__a : Optional[Any] = TrainingArguments(self.output_dir , disable_tqdm=snake_case_ , report_to=[] , **snake_case_ )
return Trainer(
snake_case_ , snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , callbacks=snake_case_ , )
def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : Optional[int] ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
# Order doesn't matter
__a : List[Any] = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
__a : Dict = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and not isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , cba.__class__ )
elif not isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(cba.__class__ , snake_case_ )
else:
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Tuple ):
__a : List[Any] = ['''on_init_end''', '''on_train_begin''']
__a : str = 0
__a : Dict = len(trainer.get_eval_dataloader() )
__a : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(snake_case_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCAmelCase (self : Optional[int] ):
__a : Any = self.get_trainer()
__a : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# Callbacks passed at init are added to the default callbacks
__a : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__a : List[Any] = self.get_trainer(disable_tqdm=snake_case_ )
__a : List[str] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__a : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
__a : Optional[int] = self.get_trainer()
__a : Tuple = trainer.pop_callback(snake_case_ )
self.assertEqual(cb.__class__ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# We can also add, pop, or remove by instance
__a : Union[str, Any] = self.get_trainer()
__a : int = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
__a : Tuple = self.get_trainer()
__a : int = trainer.callback_handler.callbacks[0]
__a : Optional[Any] = trainer.pop_callback(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCAmelCase (self : Tuple ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=snake_case_ )
__a : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__a : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# Independent log/save/eval
__a : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__a : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
__a : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__a : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
__a : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__a : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
__a : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__a : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# A bit of everything
__a : Tuple = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__a : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__a : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case_ ) in warn_mock.call_args[0][0]
| 718 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase__ ='src/transformers'
lowercase__ ='docs/source/en'
lowercase__ ='.'
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Any = f.readlines()
# Find the start prompt.
__a : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__a : Any = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase__ ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowercase__ =re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase__ =re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase__ =re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ =direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Any = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
__a : List[Any] = (width - text_length) // 2
__a : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCamelCase ( ):
__a : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__a : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__a : Optional[int] = collections.defaultdict(lowerCAmelCase__ )
__a : List[Any] = collections.defaultdict(lowerCAmelCase__ )
__a : Dict = collections.defaultdict(lowerCAmelCase__ )
__a : Tuple = collections.defaultdict(lowerCAmelCase__ )
__a : Union[str, Any] = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
__a : Any = None
if attr_name.endswith('''Tokenizer''' ):
__a : Union[str, Any] = slow_tokenizers
__a : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__a : Union[str, Any] = fast_tokenizers
__a : List[Any] = attr_name[:-1_3]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = tf_models
__a : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = flax_models
__a : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
__a : Union[str, Any] = pt_models
__a : int = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__a : List[str] = True
break
# Try again after removing the last word in the name
__a : str = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
__a : Optional[int] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__a : Optional[int] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__a : Any = [len(lowerCAmelCase__ ) + 2 for c in columns]
__a : Union[str, Any] = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
__a : List[str] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__a : Union[str, Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
__a : str = model_name_to_prefix[name]
__a : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int]=False ):
__a , __a , __a , __a : Optional[int] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__a : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 326 | 0 |
import math
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case( SCREAMING_SNAKE_CASE__ = 10_001 ) -> int:
try:
lowercase : int = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
lowercase : list[int] = []
lowercase : Optional[int] = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase : Dict = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowercase : Union[str, Any] = value
else:
lowercase : Tuple = value
return new_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
lowercase : str = """"""
if is_panoptic:
lowercase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase : int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowercase : Union[str, Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Dict = in_proj_weight[:256, :]
lowercase : Optional[int] = in_proj_bias[:256]
lowercase : Tuple = in_proj_weight[256:512, :]
lowercase : Any = in_proj_bias[256:512]
lowercase : Any = in_proj_weight[-256:, :]
lowercase : Dict = in_proj_bias[-256:]
def _snake_case( ) -> Tuple:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase : Tuple = """resnet101"""
if "dc5" in model_name:
lowercase : List[Any] = True
lowercase : Optional[Any] = """panoptic""" in model_name
if is_panoptic:
lowercase : Optional[int] = 250
else:
lowercase : Tuple = 91
lowercase : Any = """huggingface/label-files"""
lowercase : int = """coco-detection-id2label.json"""
lowercase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase : int = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowercase : List[Any] = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
# prepare image
lowercase : Dict = prepare_img()
lowercase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : List[str] = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
lowercase : Union[str, Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase : str = """conditional_detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase : Optional[int] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowercase : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowercase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
lowercase : str = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowercase : List[Any] = conditional_detr(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 336 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
__snake_case = 3e8 # unit of c : m * s^-1
def _lowercase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
UpperCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
UpperCamelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
UpperCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase ( __snake_case ):
lowercase = """markuplm"""
def __init__( self : Optional[Any] , __magic_name__ : List[Any]=3_0_5_2_2 , __magic_name__ : int=7_6_8 , __magic_name__ : List[Any]=1_2 , __magic_name__ : List[Any]=1_2 , __magic_name__ : str=3_0_7_2 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=5_1_2 , __magic_name__ : List[str]=2 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=1e-12 , __magic_name__ : str=0 , __magic_name__ : List[str]=0 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Dict=2_5_6 , __magic_name__ : Tuple=1_0_2_4 , __magic_name__ : Any=2_1_6 , __magic_name__ : str=1_0_0_1 , __magic_name__ : Dict=3_2 , __magic_name__ : Optional[int]=5_0 , __magic_name__ : List[Any]="absolute" , __magic_name__ : Any=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
# additional properties
UpperCamelCase = max_depth
UpperCamelCase = max_xpath_tag_unit_embeddings
UpperCamelCase = max_xpath_subs_unit_embeddings
UpperCamelCase = tag_pad_id
UpperCamelCase = subs_pad_id
UpperCamelCase = xpath_unit_hidden_size
| 181 | 1 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def snake_case ( snake_case : np.ndarray , snake_case : tuple[int, int] , snake_case : tuple[int, int] , snake_case : bool , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = grid.shape
lowerCAmelCase = [-1, 1, 0, 0]
lowerCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase , lowerCAmelCase = [(0, source)], set()
lowerCAmelCase = np.full((rows, cols) , np.inf )
lowerCAmelCase = 0
lowerCAmelCase = np.empty((rows, cols) , dtype=lowercase_ )
lowerCAmelCase = None
while queue:
((lowerCAmelCase) , (lowerCAmelCase)) = heappop(lowercase_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase , lowerCAmelCase = predecessors[x, y]
path.append(lowercase_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase_ ) ):
lowerCAmelCase , lowerCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase_ , (dist + 1, (nx, ny)) )
lowerCAmelCase = dist + 1
lowerCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int] , lowercase_ : bool , ):
lowercase , lowercase = grid.shape
lowercase = [-1, 1, 0, 0]
lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase = [(0, source)], set()
lowercase = np.full((rows, cols) , np.inf )
lowercase = 0
lowercase = np.empty((rows, cols) , dtype=lowercase_ )
lowercase = None
while queue:
((lowercase) , (lowercase)) = heappop(lowercase_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase = predecessors[x, y]
path.append(lowercase_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase_ ) ):
lowercase , lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase_ , (dist + 1, (nx, ny)) )
lowercase = dist + 1
lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
snake_case_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
snake_case_ = logging.getLogger()
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser()
parser.add_argument("-f" )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
return args.f
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str="eval" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , F"{split}_results.json" )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
raise ValueError(F"can\'t find {path}" )
snake_case_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : List[Any] = F"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE_ : List[str] = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Any = F"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE_ : int = get_results(__UpperCamelCase )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : str = F"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_results(__UpperCamelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Tuple = F"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE_ : Dict = get_results(__UpperCamelCase )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Tuple = F"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Any = F"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE_ : Tuple = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Optional[Any] = F"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_qa.main()
SCREAMING_SNAKE_CASE_ : Tuple = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 706 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__a )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
def extract(*__snake_case : Optional[int] , **__snake_case : Dict ):
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase = torch.ones([0] )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(__a )
return self
return Out()
return extract
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowerCamelCase = 77
lowerCamelCase = self.dummy_image.to(__a )
lowerCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
lowerCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
lowerCamelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__a , )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__a , return_dict=__a , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowerCamelCase = 77
lowerCamelCase = self.dummy_image.to(__a )
# put models in fp16
lowerCamelCase = unet.half()
lowerCamelCase = vae.half()
lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
lowerCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
lowerCamelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=__a , num_inference_steps=2 , output_type='np' , image=__a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase = init_image.resize((760, 504) )
lowerCamelCase = """BAAI/AltDiffusion"""
lowerCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase = """A fantasy landscape, trending on artstation"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type='np' , )
lowerCamelCase = output.images[0]
lowerCamelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCamelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase = init_image.resize((768, 512) )
lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
lowerCamelCase = """BAAI/AltDiffusion"""
lowerCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase = """A fantasy landscape, trending on artstation"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type='np' , )
lowerCamelCase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 246 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return getitem, k
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ):
return setitem, k, v
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return delitem, k
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Optional[int] ):
try:
return fun(lowerCAmelCase_ , *lowerCAmelCase_ ), None
except Exception as e:
return None, e
lowerCamelCase : List[str] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase : Optional[int] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase : Optional[int] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : Tuple = HashMap(initial_block_size=4 )
__lowercase : Union[str, Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Tuple = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
__lowercase , __lowercase : int = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
assert my_res == py_res
assert str(lowerCAmelCase_ ) == str(lowerCAmelCase_ )
assert set(lowerCAmelCase_ ) == set(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
assert set(my.items() ) == set(py.items() )
def snake_case_ ( ):
def is_public(lowerCAmelCase_ : str ) -> bool:
return not name.startswith("""_""" )
__lowercase : Optional[Any] = {name for name in dir({} ) if is_public(lowerCAmelCase_ )}
__lowercase : List[str] = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase_ )}
assert dict_public_names > hash_public_names | 149 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {"vocab_file": "vocab.txt"}
UpperCAmelCase_ : Tuple = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
UpperCAmelCase_ : List[str] = {
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def UpperCamelCase ( _A : Dict )-> List[Any]:
"""simple docstring"""
with open(_A , "r" ) as f:
A__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<cls>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__="<eos>" , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
A__ = load_vocab_file(UpperCAmelCase__ )
A__ = dict(enumerate(self.all_tokens ) )
A__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
A__ = unk_token
A__ = cls_token
A__ = pad_token
A__ = mask_token
A__ = eos_token
A__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __A ( self , UpperCAmelCase__ ):
return self._id_to_token.get(UpperCAmelCase__ , self.unk_token )
def __A ( self , UpperCAmelCase__ ):
return self._token_to_id.get(UpperCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self , UpperCAmelCase__ , **UpperCAmelCase__ ):
return text.split()
def __A ( self , UpperCAmelCase__=False ):
return len(self._id_to_token )
def __A ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __A ( self , UpperCAmelCase__ ):
return self._token_to_id.get(UpperCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self , UpperCAmelCase__ ):
return self._id_to_token.get(UpperCAmelCase__ , self.unk_token )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = [self.cls_token_id]
A__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A__ = [1] + ([0] * len(UpperCAmelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase__ ) + [1]
return mask
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = os.path.join(UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase__ , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __A ( self ):
return self.get_vocab_size(with_added_tokens=UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = False ):
return super()._add_tokens(UpperCAmelCase__ , special_tokens=UpperCAmelCase__ )
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
__snake_case :Union[str, Any] = StableDiffusionLDMaDPipeline
__snake_case :List[str] = TEXT_TO_IMAGE_PARAMS
__snake_case :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case :str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowercase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase = CLIPTextModel(_lowerCAmelCase )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=0 ) -> List[Any]:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionLDMaDPipeline(**_lowerCAmelCase )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb[0, -3:, -3:, -1]
__lowercase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__lowercase = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
__lowercase = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionLDMaDPipeline(**_lowerCAmelCase )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb_slice_a[0, -3:, -3:, -1]
__lowercase = depth_slice_a[0, -3:, -1]
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = ldmad_pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
__lowercase = text_inputs["""input_ids"""].to(_lowerCAmelCase )
__lowercase = ldmad_pipe.text_encoder(_lowerCAmelCase )[0]
__lowercase = prompt_embeds
# forward
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb_slice_a[0, -3:, -3:, -1]
__lowercase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
__lowercase = StableDiffusionLDMaDPipeline(**_lowerCAmelCase )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = """french fries"""
__lowercase = ldmad_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb[0, -3:, -3:, -1]
__lowercase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__lowercase = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
__lowercase = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]="cpu" , _lowerCAmelCase : int=torch.floataa , _lowerCAmelCase : int=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = np.random.RandomState(_lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowercase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
__lowercase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb[0, -3:, -3:, -1].flatten()
__lowercase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
__lowercase = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
__lowercase = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : int ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]="cpu" , _lowerCAmelCase : Dict=torch.floataa , _lowerCAmelCase : Optional[int]=0 ) -> Tuple:
"""simple docstring"""
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = np.random.RandomState(_lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowercase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
__lowercase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = 0.495_586
__lowercase = 0.33_795_515
__lowercase = 112.48_518
__lowercase = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = 0.4_194_127
__lowercase = 0.35_375_586
__lowercase = 0.5_638_502
__lowercase = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 80 |
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(_lowerCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
__lowercase = list(_lowerCAmelCase )
__lowercase = degree
def __add__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
__lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowerCAmelCase )
else:
__lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowerCAmelCase )
def __sub__( self : int , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : int | float ) -> int | float:
"""simple docstring"""
__lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
__lowercase = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCAmelCase )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.__str__()
def _a ( self : List[str] ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * self.degree
for i in range(self.degree ):
__lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : int | float = 0 ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + 2)
__lowercase = constant
for i in range(self.degree + 1 ):
__lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowerCAmelCase )
def __eq__( self : List[str] , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Dict , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
return not self.__eq__(_lowerCAmelCase )
| 80 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : Union[str, Any] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_55 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> None:
super().__init__(**__magic_name__ )
_a = size if size is not None else {'shortest_edge': 3_84}
_a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
_a = do_resize
_a = size
# Default value set here for backwards compatibility where the value in config is None
_a = crop_pct if crop_pct is not None else 2_24 / 2_56
_a = resample
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray:
_a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_a = size['shortest_edge']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_a = int(shortest_edge / crop_pct )
_a = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
_a = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> Tuple:
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = crop_pct if crop_pct is not None else self.crop_pct
_a = resample if resample is not None else self.resample
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
_a = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
_a = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
_a = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
_a = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
_a = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
_a = {'pixel_values': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 532 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : str = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
a_ : str = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
a_ : Tuple = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
a_ : Dict = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 1 |
"""simple docstring"""
import random
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = num - 1
lowerCamelCase_ = 0
while s % 2 == 0:
lowerCamelCase_ = s // 2
t += 1
for _ in range(5 ):
lowerCamelCase_ = random.randrange(2 ,num - 1 )
lowerCamelCase_ = pow(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if v != 1:
lowerCamelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase_ = i + 1
lowerCamelCase_ = (v**2) % num
return True
def lowercase ( lowerCAmelCase__ ):
if num < 2:
return False
lowerCamelCase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ = 1_024 ):
while True:
lowerCamelCase_ = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase__ ):
return num
if __name__ == "__main__":
A_ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 29 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: Any=3 , __lowerCamelCase: List[str]=18 , __lowerCamelCase: List[Any]=30 , __lowerCamelCase: Tuple=400 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Any=None , __lowerCamelCase: int=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=True , __lowerCamelCase: List[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: int=False , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = size if size is not None else {"height": 20, "width": 20}
UpperCamelCase__: List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase__: Optional[int] = parent
UpperCamelCase__: int = batch_size
UpperCamelCase__: int = num_channels
UpperCamelCase__: str = image_size
UpperCamelCase__: Any = min_resolution
UpperCamelCase__: Union[str, Any] = max_resolution
UpperCamelCase__: Optional[Any] = do_resize
UpperCamelCase__: Any = size
UpperCamelCase__: str = do_center_crop
UpperCamelCase__: Any = crop_size
UpperCamelCase__: Any = do_normalize
UpperCamelCase__: int = image_mean
UpperCamelCase__: Tuple = image_std
UpperCamelCase__: int = do_reduce_labels
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test")
UpperCamelCase__: Optional[Any] = Image.open(dataset[0]["file"])
UpperCamelCase__: str = Image.open(dataset[1]["file"])
return image, map
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test")
UpperCamelCase__: int = Image.open(ds[0]["file"])
UpperCamelCase__: int = Image.open(ds[1]["file"])
UpperCamelCase__: List[str] = Image.open(ds[2]["file"])
UpperCamelCase__: List[Any] = Image.open(ds[3]["file"])
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
UpperCamelCase__: int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase__: List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__: str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__: Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase__: Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__: str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
UpperCamelCase__: str = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCamelCase__: Dict = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
UpperCamelCase__: Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
UpperCamelCase__ , UpperCamelCase__: str = prepare_semantic_single_inputs()
UpperCamelCase__: Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
UpperCamelCase__ , UpperCamelCase__: List[str] = prepare_semantic_batch_inputs()
UpperCamelCase__: Optional[int] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCamelCase__ , UpperCamelCase__: Any = prepare_semantic_single_inputs()
UpperCamelCase__: int = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
UpperCamelCase__: List[Any] = True
UpperCamelCase__: List[str] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 380 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : int = StableDiffusionInpaintPipeline
_lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowercase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowercase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : int = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=a)
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Tuple:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB').resize((64, 64))
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline(**a)
SCREAMING_SNAKE_CASE = sd_pipe.to(a)
sd_pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = sd_pipe(**a).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-2-inpainting'
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'Face of a yellow cat, high resolution, sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9E-3
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-2-inpainting'
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'Face of a yellow cat, high resolution, sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-2-inpainting'
SCREAMING_SNAKE_CASE = PNDMScheduler.from_pretrained(a , subfolder='scheduler')
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = 'Face of a yellow cat, high resolution, sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 444 |
import os
# Precomputes a list of the 100 first triangular numbers
a_ : str = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'words.txt')
SCREAMING_SNAKE_CASE = ''
with open(_UpperCAmelCase) as f:
SCREAMING_SNAKE_CASE = f.readline()
SCREAMING_SNAKE_CASE = [word.strip('"') for word in words.strip('\r\n').split(',')]
SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(_UpperCAmelCase) - 64 for x in word) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase)
if __name__ == "__main__":
print(solution())
| 444 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def a__ ( A__, A__, A__, A__ = 1_0_0, ):
SCREAMING_SNAKE_CASE_ : Tuple = x_start
SCREAMING_SNAKE_CASE_ : Dict = fnc(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
for _ in range(A__ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE_ : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE_ : Any = fnc(A__ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE_ : int = xa
SCREAMING_SNAKE_CASE_ : Optional[int] = fxa
return length
if __name__ == "__main__":
def a__ ( A__ ):
return math.sin(1_0 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
lowerCAmelCase__ : Union[str, Any] =10
while i <= 10_00_00:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 101 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : int=13 , snake_case__ : List[str]=7 , snake_case__ : Any=True , snake_case__ : Any=True , snake_case__ : Dict=True , snake_case__ : List[Any]=True , snake_case__ : List[str]=99 , snake_case__ : Any=32 , snake_case__ : List[str]=2 , snake_case__ : Any=4 , snake_case__ : Dict=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[int]=512 , snake_case__ : Union[str, Any]=16 , snake_case__ : str=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=3 , snake_case__ : List[Any]=4 , snake_case__ : List[Any]=None , snake_case__ : str=0 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = TFDPRContextEncoder(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : str , snake_case__ : Dict , snake_case__ : str , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict ):
lowerCAmelCase__ = TFDPRQuestionEncoder(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple ):
lowerCAmelCase__ = TFDPRReader(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowerCAmelCase__ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(snake_case__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 644 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__UpperCamelCase =os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__UpperCamelCase =convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__UpperCamelCase =convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return flax_state_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, jnp.ndarray] , SCREAMING_SNAKE_CASE__ : str , ):
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ : Tuple[str] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__UpperCamelCase =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__UpperCamelCase =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__UpperCamelCase =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__UpperCamelCase =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__UpperCamelCase =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__UpperCamelCase =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__UpperCamelCase =pt_tuple_key[-2] + '_v'
if name is not None:
__UpperCamelCase =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
# convert pytorch tensor to numpy
__UpperCamelCase ={k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__UpperCamelCase =flax_model.params['params']
else:
__UpperCamelCase =flax_model.params
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={}
__UpperCamelCase =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__UpperCamelCase =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase =pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase =rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
__UpperCamelCase =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
import torch
# Load the index
__UpperCamelCase ={}
for shard_file in shard_filenames:
# load using msgpack utils
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase =flax_model.params['params']
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__UpperCamelCase =flax_model.params
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__UpperCamelCase =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase =pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase =rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
__UpperCamelCase =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
if "var" in flax_key[-1]:
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as state_f:
try:
__UpperCamelCase =from_bytes(SCREAMING_SNAKE_CASE__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__UpperCamelCase =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__UpperCamelCase =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =pt_model.state_dict()
__UpperCamelCase =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__UpperCamelCase =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__UpperCamelCase =[]
__UpperCamelCase =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase =flax_key_tuple[0] == pt_model.base_model_prefix
__UpperCamelCase ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# conv layer
__UpperCamelCase =flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase =jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# linear layer
__UpperCamelCase =flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__UpperCamelCase =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__UpperCamelCase =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__UpperCamelCase ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__UpperCamelCase ='.'.join(SCREAMING_SNAKE_CASE__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__UpperCamelCase ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__UpperCamelCase =key.split('.' )
__UpperCamelCase =None
if key_components[-3::2] == ["parametrizations", "original0"]:
__UpperCamelCase =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__UpperCamelCase =key_components[-2] + '_v'
if name is not None:
__UpperCamelCase =key_components[:-3] + [name]
__UpperCamelCase ='.'.join(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =key
if flax_key in special_pt_names:
__UpperCamelCase =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__UpperCamelCase =np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _A ( ):
UpperCAmelCase__: Tuple = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" ,type=SCREAMING_SNAKE_CASE ,default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" ,type=SCREAMING_SNAKE_CASE ,default=5 )
parser.add_argument("--batch_size" ,type=SCREAMING_SNAKE_CASE ,default=6 )
parser.add_argument("--gradient_accumulation_steps" ,type=SCREAMING_SNAKE_CASE ,default=1 )
parser.add_argument("--freeze" ,type=SCREAMING_SNAKE_CASE ,default=SCREAMING_SNAKE_CASE )
parser.add_argument("--learning_rate" ,type=SCREAMING_SNAKE_CASE ,default=5e-4 )
parser.add_argument("--seed" ,type=SCREAMING_SNAKE_CASE ,default=0 )
parser.add_argument("--lr_scheduler_type" ,type=SCREAMING_SNAKE_CASE ,default="cosine" )
parser.add_argument("--num_warmup_steps" ,type=SCREAMING_SNAKE_CASE ,default=1_0 )
parser.add_argument("--weight_decay" ,type=SCREAMING_SNAKE_CASE ,default=0.01 )
parser.add_argument("--output_dir" ,type=SCREAMING_SNAKE_CASE ,default="./results" )
return parser.parse_args()
_lowerCAmelCase : int =load("""accuracy""")
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = eval_pred
UpperCAmelCase__: str = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE ,references=SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( _a ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super().__init__()
UpperCAmelCase__: Union[str, Any] = trainer
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
if control.should_evaluate:
UpperCAmelCase__: int = deepcopy(lowerCamelCase__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _A ( ):
UpperCAmelCase__: int = get_args()
set_seed(args.seed )
UpperCAmelCase__: Optional[int] = load_dataset("codeparrot/codecomplex" ,split="train" )
UpperCAmelCase__: int = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase__: Union[str, Any] = train_test["test"].train_test_split(test_size=0.5 )
UpperCAmelCase__: Dict = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
UpperCAmelCase__: str = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase__: Dict = tokenizer.eos_token
UpperCAmelCase__: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 )
UpperCAmelCase__: Dict = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase__: Dict = False
UpperCAmelCase__: Tuple = ClassLabel(num_classes=7 ,names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Union[str, Any] = tokenizer(example["src"] ,truncation=SCREAMING_SNAKE_CASE ,max_length=1_0_2_4 )
UpperCAmelCase__: List[Any] = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase__: Dict = train_test_validation.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,remove_columns=train_test_validation["train"].column_names ,)
UpperCAmelCase__: List[str] = DataCollatorWithPadding(tokenizer=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[str] = TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy="epoch" ,save_strategy="epoch" ,logging_strategy="epoch" ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model="accuracy" ,run_name="complexity-java" ,report_to="wandb" ,)
UpperCAmelCase__: Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE ,args=SCREAMING_SNAKE_CASE ,train_dataset=tokenized_datasets["train"] ,eval_dataset=tokenized_datasets["valid"] ,tokenizer=SCREAMING_SNAKE_CASE ,data_collator=SCREAMING_SNAKE_CASE ,compute_metrics=SCREAMING_SNAKE_CASE ,)
print("Training..." )
trainer.add_callback(CustomCallback(SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main() | 113 |
def _A ( SCREAMING_SNAKE_CASE ): # noqa: E741
UpperCAmelCase__: int = len(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Dict = 0
UpperCAmelCase__: Optional[int] = [0] * n
UpperCAmelCase__: List[str] = [False] * n
UpperCAmelCase__: List[str] = [False] * n
def dfs(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
UpperCAmelCase__: List[str] = True
UpperCAmelCase__: List[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase__: str = dfs(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[Any] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase__: List[Any] = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase__: str = True
else:
UpperCAmelCase__: Union[str, Any] = min(low[at] ,SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(SCREAMING_SNAKE_CASE ):
if not visited[i]:
UpperCAmelCase__: Optional[Any] = 0
UpperCAmelCase__: Union[str, Any] = dfs(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,-1 ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Tuple = out_edge_count > 1
for x in range(len(SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(SCREAMING_SNAKE_CASE )
# Adjacency list of graph
_lowerCAmelCase : Any ={
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 113 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Return True if there is node that has not iterated.
_UpperCAmelCase = [False] * len(lowercase )
_UpperCAmelCase = []
queue.append(lowercase )
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# This array is filled by BFS and to store path
_UpperCAmelCase = [-1] * (len(lowercase ))
_UpperCAmelCase = 0
while bfs(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = float("""Inf""" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(lowercase ,graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
return max_flow
UpperCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 275 | """simple docstring"""
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
UpperCAmelCase__ = re.compile(r"""([a-z\d])([A-Z])""")
UpperCAmelCase__ = re.compile(r"""(?<!_)_(?!_)""")
UpperCAmelCase__ = re.compile(r"""(_{2,})""")
UpperCAmelCase__ = r"""^\w+(\.\w+)*$"""
UpperCAmelCase__ = r"""<>:/\|?*"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" ,lowercase )
_UpperCAmelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" ,lowercase )
return name.lower()
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _single_underscore_re.split(lowercase )
_UpperCAmelCase = [_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re ,lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = filename_prefix_for_split(lowercase ,lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
return f'''{filepath}*'''
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = filename_prefix_for_split(lowercase ,lowercase )
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
if shard_lengths:
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
_UpperCAmelCase = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_UpperCAmelCase = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 275 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :Dict = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCamelCase :Tuple = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCamelCase :List[Any] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 658 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0
UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ )
return {
"accuracy": accuracy,
}
| 658 | 1 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class SCREAMING_SNAKE_CASE__ :
pass
| 705 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase (snake_case__ : int , snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCAmelCase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase = v
else:
lowerCAmelCase = v
lowerCAmelCase = chkpt["""params"""]
lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase = chkpt["""dico_word2id"""]
lowerCAmelCase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case__ , snake_case__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 529 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_snake_case : Dict = logging.get_logger(__name__)
class A ( a_ ):
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
if isinstance(a_ , a_ ):
_a = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(a_ ) )
if isinstance(a_ , a_ ):
_a = [sequences]
_a = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(a_ )
class A ( a_ ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=ZeroShotClassificationArgumentHandler() , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
_a = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Dict=TruncationStrategy.ONLY_FIRST , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
_a = self.tokenizer.eos_token
try:
_a = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_a = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
if kwargs.get('''multi_class''' , a_ ) is not None:
_a = kwargs["multi_class"]
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
_a = {}
if "candidate_labels" in kwargs:
_a = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
_a = kwargs["hypothesis_template"]
_a = {}
if "multi_label" in kwargs:
_a = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] , ) -> str:
"""simple docstring"""
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
_a = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(a_ , **a_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict="This example is {}." ) -> Optional[Any]:
"""simple docstring"""
_a = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
_a = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = inputs["candidate_label"]
_a = inputs["sequence"]
_a = {k: inputs[k] for k in self.tokenizer.model_input_names}
_a = self.model(**a_ )
_a = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=False ) -> Tuple:
"""simple docstring"""
_a = [outputs["candidate_label"] for outputs in model_outputs]
_a = [outputs["sequence"] for outputs in model_outputs]
_a = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
_a = logits.shape[0]
_a = len(a_ )
_a = N // n
_a = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_a = self.entailment_id
_a = -1 if entailment_id == 0 else 0
_a = reshaped_outputs[..., [contradiction_id, entailment_id]]
_a = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
_a = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_a = reshaped_outputs[..., self.entailment_id]
_a = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
_a = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( a_ ):
__lowerCAmelCase = ["pixel_values"]
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , **a_ , ):
super().__init__(**a_ )
a_ : Any = size if size is not None else {"shortest_edge": 2_5_6}
a_ : Any = get_size_dict(a_ , default_to_square=a_ )
a_ : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
a_ : Optional[int] = get_size_dict(a_ , param_name="crop_size" )
a_ : List[Any] = do_resize
a_ : List[str] = size
a_ : Dict = resample
a_ : int = do_center_crop
a_ : List[Any] = crop_size
a_ : Union[str, Any] = do_rescale
a_ : str = rescale_factor
a_ : Any = do_normalize
a_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , a_ , a_ , a_ = PILImageResampling.BICUBIC , a_ = None , **a_ , ):
a_ : Tuple = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a_ : Optional[int] = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ , ):
a_ : Any = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ ):
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ , a_ = None , **a_ , ):
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
a_ : int = do_resize if do_resize is not None else self.do_resize
a_ : Any = size if size is not None else self.size
a_ : Dict = get_size_dict(a_ , default_to_square=a_ )
a_ : int = resample if resample is not None else self.resample
a_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Any = crop_size if crop_size is not None else self.crop_size
a_ : Dict = get_size_dict(a_ , param_name="crop_size" )
a_ : str = do_rescale if do_rescale is not None else self.do_rescale
a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a_ : int = image_mean if image_mean is not None else self.image_mean
a_ : Dict = image_std if image_std is not None else self.image_std
a_ : int = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a_ : Optional[int] = [to_numpy_array(a_ ) for image in images]
if do_resize:
a_ : Union[str, Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
a_ : Union[str, Any] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
a_ : int = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
a_ : List[str] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
a_ : Tuple = [to_channel_dimension_format(a_ , a_ ) for image in images]
a_ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
def snake_case_ ( self , a_ , a_ = None ):
a_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a_ ):
a_ : str = target_sizes.numpy()
a_ : int = []
for idx in range(len(a_ ) ):
a_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a_ )
a_ : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
a_ : List[str] = logits.argmax(dim=1 )
a_ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 237 | 0 |
from __future__ import annotations
A__ = 10
def _lowerCamelCase ( a_ : list[int]):
lowerCamelCase :Any = 1
lowerCamelCase :List[str] = max(a_)
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase :list[list] = [[] for _ in range(a_)]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase :Tuple = int((i / placement) % RADIX)
buckets[tmp].append(a_)
# put each buckets' contents into list_of_ints
lowerCamelCase :Optional[int] = 0
for b in range(a_):
for i in buckets[b]:
lowerCamelCase :List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =CanineTokenizer
__a =False
def UpperCamelCase__ ( self : List[Any] ):
super().setUp()
_a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self : Optional[Any] ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def UpperCamelCase__ ( self : str , **__a : List[Any] ):
_a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
_a = 10_24
return tokenizer
@require_torch
def UpperCamelCase__ ( self : str ):
_a = self.canine_tokenizer
_a = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
_a = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_a = tokenizer(__a , padding=__a , return_tensors="pt" )
self.assertIsInstance(__a , __a )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCamelCase__ ( self : List[str] ):
_a = self.canine_tokenizer
_a = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
_a = tokenizer(__a , padding=__a , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __a )
self.assertIn("attention_mask" , __a )
self.assertIn("token_type_ids" , __a )
@require_torch
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.canine_tokenizer
_a = [
"What's the weater?",
"It's about 25 degrees.",
]
_a = tokenizer(
text_target=__a , max_length=32 , padding="max_length" , truncation=__a , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCamelCase__ ( self : List[str] ):
# safety check on max_len default value so we are sure the test works
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_a = tempfile.mkdtemp()
_a = " He is very happy, UNwant\u00E9d,running"
_a = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
_a = tokenizer.__class__.from_pretrained(__a )
_a = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
shutil.rmtree(__a )
_a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_a = tempfile.mkdtemp()
_a = " He is very happy, UNwant\u00E9d,running"
_a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_a = chr(0XE0_07 )
additional_special_tokens.append(__a )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
_a = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
_a = tokenizer.__class__.from_pretrained(__a )
_a = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
self.assertIn(__a , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_a = tokenizer.__class__.from_pretrained(__a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__a )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_a , _a = self.get_clean_sequence(__a )
# a special token for Canine can be defined as follows:
_a = 0XE0_05
_a = chr(__a )
tokenizer.add_special_tokens({"cls_token": special_token} )
_a = tokenizer.encode(__a , add_special_tokens=__a )
self.assertEqual(len(__a ) , 1 )
_a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
self.assertEqual(__a , input_encoded + special_token_id )
_a = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__ ( self : Tuple ):
_a = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_a = chr(0XE0_05 )
_a = chr(0XE0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__a )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
_a = tokenizer.tokenize(__a )
_a = tokenizer.tokenize(__a )
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
self.assertEqual(token_a[0] , __a )
self.assertEqual(token_a[0] , __a )
@require_tokenizers
def UpperCamelCase__ ( self : List[Any] ):
_a = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_a = 0XE0_06
_a = chr(__a )
_a = AddedToken(__a , lstrip=__a )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__a )
tokenizer.from_pretrained(__a )
def UpperCamelCase__ ( self : Any ):
_a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__a )
with open(os.path.join(__a , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
_a = json.load(__a )
with open(os.path.join(__a , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
_a = json.load(__a )
# a special token for Canine can be defined as follows:
_a = 0XE0_06
_a = chr(__a )
_a = [new_token_a]
_a = [new_token_a]
with open(os.path.join(__a , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
with open(os.path.join(__a , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_a = tokenizer_class.from_pretrained(__a , extra_ids=0 )
self.assertIn(__a , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_a = 0XE0_07
_a = chr(__a )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_a = [AddedToken(__a , lstrip=__a )]
_a = tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , extra_ids=0 )
self.assertIn(__a , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_a = "hello world"
if self.space_between_special_tokens:
_a = "[CLS] hello world [SEP]"
else:
_a = input
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = tokenizer.decode(__a , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__a , [output, output.lower()] )
def UpperCamelCase__ ( self : str ):
_a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_a = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
_a = "a"
_a = ord(__a )
for attr in attributes_list:
setattr(__a , attr + "_id" , __a )
self.assertEqual(getattr(__a , __a ) , __a )
self.assertEqual(getattr(__a , attr + "_id" ) , __a )
setattr(__a , attr + "_id" , __a )
self.assertEqual(getattr(__a , __a ) , __a )
self.assertEqual(getattr(__a , attr + "_id" ) , __a )
setattr(__a , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__a , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__a , "additional_special_tokens_ids" ) , [] )
_a = 0XE0_06
_a = chr(__a )
setattr(__a , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__a , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__a , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def UpperCamelCase__ ( self : Union[str, Any] ):
pass
def UpperCamelCase__ ( self : List[str] ):
pass
def UpperCamelCase__ ( self : List[Any] ):
pass
def UpperCamelCase__ ( self : Optional[int] ):
pass
def UpperCamelCase__ ( self : int ):
pass
def UpperCamelCase__ ( self : int ):
pass
def UpperCamelCase__ ( self : Union[str, Any] ):
pass
def UpperCamelCase__ ( self : Optional[Any] ):
pass
| 692 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int , __a : int , __a : int , __a : str=0.0 , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : str = "layer_norm" , __a : bool = False , ):
super().__init__()
_a = only_cross_attention
_a = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_a = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_a = AdaLayerNorm(__a , __a )
elif self.use_ada_layer_norm_zero:
_a = AdaLayerNormZero(__a , __a )
else:
_a = nn.LayerNorm(__a , elementwise_affine=__a )
_a = Attention(
query_dim=__a , heads=__a , dim_head=__a , dropout=__a , bias=__a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_a = (
AdaLayerNorm(__a , __a )
if self.use_ada_layer_norm
else nn.LayerNorm(__a , elementwise_affine=__a )
)
_a = Attention(
query_dim=__a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__a , dim_head=__a , dropout=__a , bias=__a , upcast_attention=__a , ) # is self-attn if encoder_hidden_states is none
else:
_a = None
_a = None
# 3. Feed-forward
_a = nn.LayerNorm(__a , elementwise_affine=__a )
_a = FeedForward(__a , dropout=__a , activation_fn=__a , final_dropout=__a )
# let chunk size default to None
_a = None
_a = 0
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int ):
# Sets chunk feed-forward
_a = chunk_size
_a = dim
def UpperCamelCase__ ( self : List[str] , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Dict[str, Any] = None , __a : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_a = self.norma(__a , __a )
elif self.use_ada_layer_norm_zero:
_a , _a , _a , _a , _a = self.norma(
__a , __a , __a , hidden_dtype=hidden_states.dtype )
else:
_a = self.norma(__a )
_a = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_a = self.attna(
__a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__a , **__a , )
if self.use_ada_layer_norm_zero:
_a = gate_msa.unsqueeze(1 ) * attn_output
_a = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_a = (
self.norma(__a , __a ) if self.use_ada_layer_norm else self.norma(__a )
)
_a = self.attna(
__a , encoder_hidden_states=__a , attention_mask=__a , **__a , )
_a = attn_output + hidden_states
# 3. Feed-forward
_a = self.norma(__a )
if self.use_ada_layer_norm_zero:
_a = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
_a = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_a = torch.cat(
[self.ff(__a ) for hid_slice in norm_hidden_states.chunk(__a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_a = self.ff(__a )
if self.use_ada_layer_norm_zero:
_a = gate_mlp.unsqueeze(1 ) * ff_output
_a = ff_output + hidden_states
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __a : int , __a : Optional[int] = None , __a : int = 4 , __a : float = 0.0 , __a : str = "geglu" , __a : bool = False , ):
super().__init__()
_a = int(dim * mult )
_a = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_a = GELU(__a , __a )
if activation_fn == "gelu-approximate":
_a = GELU(__a , __a , approximate="tanh" )
elif activation_fn == "geglu":
_a = GEGLU(__a , __a )
elif activation_fn == "geglu-approximate":
_a = ApproximateGELU(__a , __a )
_a = nn.ModuleList([] )
# project in
self.net.append(__a )
# project dropout
self.net.append(nn.Dropout(__a ) )
# project out
self.net.append(nn.Linear(__a , __a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__a ) )
def UpperCamelCase__ ( self : List[Any] , __a : Tuple ):
for module in self.net:
_a = module(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int , __a : int , __a : int , __a : str = "none" ):
super().__init__()
_a = nn.Linear(__a , __a )
_a = approximate
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any] ):
if gate.device.type != "mps":
return F.gelu(__a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self : str , __a : Optional[int] ):
_a = self.proj(__a )
_a = self.gelu(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : str , __a : int , __a : int ):
super().__init__()
_a = nn.Linear(__a , dim_out * 2 )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[int] ):
if gate.device.type != "mps":
return F.gelu(__a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
_a , _a = self.proj(__a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__a )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : int , __a : int ):
super().__init__()
_a = nn.Linear(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Dict ):
_a = self.proj(__a )
return x * torch.sigmoid(1.702 * x )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : str ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.SiLU()
_a = nn.Linear(__a , embedding_dim * 2 )
_a = nn.LayerNorm(__a , elementwise_affine=__a )
def UpperCamelCase__ ( self : Tuple , __a : Any , __a : Optional[Any] ):
_a = self.linear(self.silu(self.emb(__a ) ) )
_a , _a = torch.chunk(__a , 2 )
_a = self.norm(__a ) * (1 + scale) + shift
return x
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __a : List[Any] , __a : Any ):
super().__init__()
_a = CombinedTimestepLabelEmbeddings(__a , __a )
_a = nn.SiLU()
_a = nn.Linear(__a , 6 * embedding_dim , bias=__a )
_a = nn.LayerNorm(__a , elementwise_affine=__a , eps=1e-6 )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict , __a : List[Any] , __a : Union[str, Any] , __a : List[Any]=None ):
_a = self.linear(self.silu(self.emb(__a , __a , hidden_dtype=__a ) ) )
_a , _a , _a , _a , _a , _a = emb.chunk(6 , dim=1 )
_a = self.norm(__a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int , __a : int , __a : int , __a : Optional[str] = None , __a : float = 1e-5 ):
super().__init__()
_a = num_groups
_a = eps
if act_fn is None:
_a = None
else:
_a = get_activation(__a )
_a = nn.Linear(__a , out_dim * 2 )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] , __a : List[Any] ):
if self.act:
_a = self.act(__a )
_a = self.linear(__a )
_a = emb[:, :, None, None]
_a , _a = emb.chunk(2 , dim=1 )
_a = F.group_norm(__a , self.num_groups , eps=self.eps )
_a = x * (1 + scale) + shift
return x
| 692 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Tuple = logging.get_logger(__name__)
A__: List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class A__ ( SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase : Optional[Any] = "camembert"
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :int=7_6_8 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :List[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE :Dict="gelu" , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :int=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :int=0.02 , SCREAMING_SNAKE_CASE :str=1e-12 , SCREAMING_SNAKE_CASE :Optional[Any]=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :Optional[Any]="absolute" , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :Tuple=None , **SCREAMING_SNAKE_CASE :Dict , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
_a : Dict =vocab_size
_a : Dict =hidden_size
_a : Dict =num_hidden_layers
_a : str =num_attention_heads
_a : List[Any] =hidden_act
_a : Dict =intermediate_size
_a : str =hidden_dropout_prob
_a : Dict =attention_probs_dropout_prob
_a : Union[str, Any] =max_position_embeddings
_a : int =type_vocab_size
_a : List[str] =initializer_range
_a : Optional[Any] =layer_norm_eps
_a : Union[str, Any] =position_embedding_type
_a : Optional[Any] =use_cache
_a : str =classifier_dropout
class A__ ( SCREAMING_SNAKE_CASE_ ):
@property
def __UpperCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Optional[int] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : Tuple ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Optional[int] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 506 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
SCREAMING_SNAKE_CASE : Dict = {name: i for i, name in enumerate(_a)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
_a , dtype=torch.floataa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : Optional[Any] = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE : Any = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_mask
SCREAMING_SNAKE_CASE : Tuple = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE : str = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE : Optional[int] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = tree_map(lambda _a: torch.tensor(_a , device=batch["aatype"].device) , _a , np.ndarray)
SCREAMING_SNAKE_CASE : int = tensor_tree_map(lambda _a: np.array(_a) , make_atomaa_masks(_a))
return out | 25 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["input_features", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : Tuple=80 , lowercase_ : Optional[int]=16000 , lowercase_ : str=80 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=True , **lowercase_ : List[Any] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = num_mel_bins
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_ceptral_normalize
SCREAMING_SNAKE_CASE_ : Dict = normalize_means
SCREAMING_SNAKE_CASE_ : Dict = normalize_vars
SCREAMING_SNAKE_CASE_ : Dict = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : np.ndarray , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(lowercase_).unsqueeze(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ta_kaldi.fbank(lowercase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool] = True , lowercase_ : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
SCREAMING_SNAKE_CASE_ : Optional[int] = x[:input_length].mean(axis=0)
SCREAMING_SNAKE_CASE_ : List[str] = np.subtract(lowercase_ , lowercase_)
if normalize_vars:
SCREAMING_SNAKE_CASE_ : Optional[Any] = x[:input_length].std(axis=0)
SCREAMING_SNAKE_CASE_ : Tuple = np.divide(lowercase_ , lowercase_)
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ : str = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.astype(np.floataa)
return x
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase_ , lowercase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(lowercase_ , lowercase_)
]
def __call__( self : Dict , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
SCREAMING_SNAKE_CASE_ : str = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
SCREAMING_SNAKE_CASE_ : List[str] = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray(lowercase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
SCREAMING_SNAKE_CASE_ : Optional[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Optional[int] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ : Dict = [self._extract_fbank_features(lowercase_) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchFeature({'''input_features''': features})
SCREAMING_SNAKE_CASE_ : Dict = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs.get('''input_features''')
if isinstance(input_features[0] , lowercase_):
SCREAMING_SNAKE_CASE_ : List[str] = [np.asarray(lowercase_ , dtype=np.floataa) for feature in input_features]
SCREAMING_SNAKE_CASE_ : Optional[int] = padded_inputs.get('''attention_mask''')
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = [np.asarray(lowercase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
np.array(lowercase_ , dtype=np.intaa)
if self._get_padding_strategies(lowercase_ , max_length=lowercase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE_ : Tuple = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowercase_)
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : str = padded_inputs.convert_to_tensors(lowercase_)
return padded_inputs
| 512 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_UpperCAmelCase = None
_UpperCAmelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_UpperCAmelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a :
UpperCamelCase : bool = True
UpperCamelCase : Optional[str] = None
# Automatically constructed
UpperCamelCase : ClassVar[str] = "PIL.Image.Image"
UpperCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
UpperCamelCase : str = field(default='Image' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.pa_type
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.array(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase )
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : dict , lowerCAmelCase : Tuple=None ) -> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_: int ={}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =PIL.Image.open(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: int =path.split("""::""" )[-1]
try:
SCREAMING_SNAKE_CASE_: Optional[int] =string_to_dict(lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
SCREAMING_SNAKE_CASE_: List[Any] =token_per_repo_id.get(lowerCAmelCase )
except ValueError:
SCREAMING_SNAKE_CASE_: List[Any] =None
with xopen(lowerCAmelCase , """rb""" , use_auth_token=lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE_: int =BytesIO(f.read() )
SCREAMING_SNAKE_CASE_: Union[str, Any] =PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_: int =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase__ ( self : Tuple ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_: Dict =pa.array([None] * len(lowerCAmelCase ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_: int =pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_: Tuple =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
SCREAMING_SNAKE_CASE_: Optional[int] =storage.field("""bytes""" )
else:
SCREAMING_SNAKE_CASE_: str =pa.array([None] * len(lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
SCREAMING_SNAKE_CASE_: List[str] =storage.field("""path""" )
else:
SCREAMING_SNAKE_CASE_: Dict =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_: Any =pa.array(
[encode_np_array(np.array(lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_: int =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Tuple =pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase , self.pa_type )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase : List[str] ):
with xopen(lowerCAmelCase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: List[Any] =f.read()
return bytes_
SCREAMING_SNAKE_CASE_: int =pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_: Dict =pa.array(
[os.path.basename(lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_: Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase , self.pa_type )
def __magic_name__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_: Any =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_: Optional[int] =image.format
else:
SCREAMING_SNAKE_CASE_: Any ="""PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowercase , format=lowercase )
return buffer.getvalue()
def __magic_name__ ( lowercase ):
if hasattr(lowercase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase )}
def __magic_name__ ( lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
SCREAMING_SNAKE_CASE_: List[Any] =array.dtype
SCREAMING_SNAKE_CASE_: List[Any] =dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_: int =dtype.kind
SCREAMING_SNAKE_CASE_: str =dtype.itemsize
SCREAMING_SNAKE_CASE_: Optional[Any] =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_: List[str] =np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_: int =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_: Any =dtype_byteorder + dtype_kind + str(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =np.dtype(lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_: int =PIL.Image.fromarray(array.astype(lowercase ) )
return {"path": None, "bytes": image_to_bytes(lowercase )}
def __magic_name__ ( lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =first_non_null_value(lowercase )
if isinstance(lowercase , lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE_: List[str] =no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
elif isinstance(lowercase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_: List[Any] =no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 36 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36 | 1 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCAmelCase_ , lowerCAmelCase_ )
return actual_power(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 77 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
"""simple docstring"""
@staticmethod
def _UpperCamelCase ( *a_ , **a_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCamelCase_ : Optional[Any] = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Optional[int] = vqa_pipeline(a_ , top_k=1 )
self.assertEqual(
a_ , [
[{"score": ANY(a_ ), "answer": ANY(a_ )}],
[{"score": ANY(a_ ), "answer": ANY(a_ )}],
] , )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCamelCase_ : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ : str = "How many cats are there?"
lowerCamelCase_ : int = vqa_pipeline(image=a_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
a_ , [{"score": ANY(a_ ), "answer": ANY(a_ )}, {"score": ANY(a_ ), "answer": ANY(a_ )}] )
lowerCamelCase_ : List[str] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
a_ , [{"score": ANY(a_ ), "answer": ANY(a_ )}, {"score": ANY(a_ ), "answer": ANY(a_ )}] )
@slow
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
lowerCamelCase_ : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ : Dict = "How many cats are there?"
lowerCamelCase_ : str = vqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
lowerCamelCase_ : Any = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
lowerCamelCase_ : Optional[int] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def _UpperCamelCase ( self ):
pass
| 250 | 0 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ : Tuple = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 637 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = None
def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : List[Any] = []
for i in range(_snake_case ):
lowerCAmelCase : int = i / num_diffusion_timesteps
lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class snake_case_( a__ , a__ ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ )
lowerCAmelCase : str = 1.0 - self.betas
lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase : Any = 1.0
# setable values
lowerCAmelCase : Any = None
lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCAmelCase : List[str] = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Any = num_inference_steps
lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ):
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : int = self.alphas_cumprod[t]
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : Tuple = self.betas[t]
else:
lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) )
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase : Optional[Any] = variance.log()
lowerCAmelCase : Union[str, Any] = beta.log()
lowerCAmelCase : Dict = (predicted_variance + 1) / 2
lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase : Any = t - 1
lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase : int = 1 - alpha_prod_t
lowerCAmelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase : List[Any] = self.betas[t]
lowerCAmelCase : Optional[int] = self.alphas[t]
else:
lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : Tuple = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Dict = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase : int = 0
if t > 0:
lowerCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCAmelCase : Any = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase : str = variance
elif self.variance_type == "learned_range":
lowerCAmelCase : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
lowerCAmelCase : List[Any] = variance * variance_noise
lowerCAmelCase : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase : int = timesteps.to(original_samples.device )
lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 637 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''▁'''
_lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowerCAmelCase = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
lowerCAmelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
lowerCAmelCase__ : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Dict = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Tuple:
lowerCAmelCase__ : List[str] = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[str] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ : int = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Optional[Any] = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase ,""" """ ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 565 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : str = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
A__ : str = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
A__ : Union[str, Any] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return float((preds == labels).mean() )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="binary" ):
'''simple docstring'''
lowercase__ = simple_accuracy(lowercase_ , lowercase_ )
lowercase__ = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ , average=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {}
for id_pred, label in zip(lowercase_ , lowercase_ ):
lowercase__ = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowercase__ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ = [(pred, label)]
lowercase__ = [], []
for question, preds_labels in question_map.items():
lowercase__ = zip(*lowercase_ )
lowercase__ = fa_score(y_true=lowercase_ , y_pred=lowercase_ , average='''macro''' )
fas.append(lowercase_ )
lowercase__ = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) )
ems.append(lowercase_ )
lowercase__ = float(sum(lowercase_ ) / len(lowercase_ ) )
lowercase__ = sum(lowercase_ ) / len(lowercase_ )
lowercase__ = float(fa_score(y_true=lowercase_ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None, )
def lowercase__ ( self : str ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase__ ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a, __a )}
elif self.config_name == "cb":
return acc_and_fa(__a, __a, fa_avg='''macro''' )
elif self.config_name == "record":
lowercase__ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
lowercase__ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a, __a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a, __a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a, __a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' ) | 707 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : List[str] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
super().__init__(self , **_lowerCAmelCase )
_lowercase : Union[str, Any] = repo_info
_lowercase : Tuple = token
_lowercase : Optional[Any] = None
def __a ( self ):
if self.dir_cache is None:
_lowercase : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowercase : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowerCAmelCase ): {'name': str(_lowerCAmelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = "rb" , **_lowerCAmelCase , ):
if not isinstance(self.repo_info , _lowerCAmelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_lowercase : Tuple = hf_hub_url(self.repo_info.id , _lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCAmelCase , mode=_lowerCAmelCase , headers=get_authentication_headers_for_url(_lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def __a ( self , _lowerCAmelCase , **_lowerCAmelCase ):
self._get_dirs()
_lowercase : Union[str, Any] = self._strip_protocol(_lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , **_lowerCAmelCase ):
self._get_dirs()
_lowercase : List[Any] = PurePosixPath(path.strip('/' ) )
_lowercase : Optional[Any] = {}
for p, f in self.dir_cache.items():
_lowercase : Any = PurePosixPath(p.strip('/' ) )
_lowercase : Tuple = p.parent
if root == path:
_lowercase : int = f
_lowercase : Tuple = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 66 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
__UpperCAmelCase =multiprocessing.Manager()
__UpperCAmelCase =manager.list()
__UpperCAmelCase =multiprocessing.Process(target=snake_case__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__UpperCAmelCase =shutil.rmtree
__UpperCAmelCase =os.rmdir
__UpperCAmelCase =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__UpperCAmelCase ={}
with swallow_io():
with time_limit(snake_case__ ):
exec(snake_case__ , snake_case__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
__UpperCAmelCase =rmtree
__UpperCAmelCase =rmdir
__UpperCAmelCase =chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
def signal_handler(snake_case__ , snake_case__ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case__ )
signal.signal(signal.SIGALRM , snake_case__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__UpperCAmelCase =WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case__ ):
with contextlib.redirect_stderr(snake_case__ ):
with redirect_stdin(snake_case__ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> Dict:
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case__ ):
yield dirname
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
pass
class _SCREAMING_SNAKE_CASE ( io.StringIO ):
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return False
class _SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore
a_ : Dict = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
if root == ".":
yield
return
__UpperCAmelCase =os.getcwd()
os.chdir(snake_case__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__=None ) -> Tuple:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__UpperCAmelCase =None
__UpperCAmelCase =None
import os
__UpperCAmelCase ='''1'''
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import shutil
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import subprocess
__UpperCAmelCase =None # type: ignore
__UpperCAmelCase =None
import sys
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
| 132 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
_lowercase = dict(zip(__a ,range(len(__a ) ) ) )
_lowercase = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
_lowercase = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6000,
"return_attention_mask": False,
"do_normalize": True,
}
_lowercase = tempfile.mkdtemp()
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase = os.path.join(self.tmpdirname ,__a )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.feature_extraction_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
# load decoder from hub
_lowercase = "hf-internal-testing/ngram-beam-search-decoder"
def __UpperCAmelCase ( self : Tuple ,**__A : Union[str, Any] ) -> int:
_lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**__a )
def __UpperCAmelCase ( self : Tuple ,**__A : List[str] ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**__a )
def __UpperCAmelCase ( self : str ,**__A : Optional[int] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**__a )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = self.get_tokenizer()
_lowercase = self.get_feature_extractor()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
processor.save_pretrained(self.tmpdirname )
_lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,__a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,__a )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(__a ,'include' ):
WavaVecaProcessorWithLM(
tokenizer=__a ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_lowercase = self.get_feature_extractor()
_lowercase = self.get_tokenizer()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
_lowercase = floats_list((3, 1000) )
_lowercase = feature_extractor(__a ,return_tensors='np' )
_lowercase = processor(__a ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __UpperCAmelCase ( self : Any ) -> int:
_lowercase = self.get_feature_extractor()
_lowercase = self.get_tokenizer()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
_lowercase = "This is a test string"
_lowercase = processor(text=__a )
_lowercase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple=(2, 10, 16) ,__A : int=77 ) -> List[Any]:
np.random.seed(__a )
return np.random.rand(*__a )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
_lowercase = self.get_feature_extractor()
_lowercase = self.get_tokenizer()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
_lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
_lowercase = processor.decode(__a )
_lowercase = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('</s> <s> </s>' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def __UpperCAmelCase ( self : List[str] ,__A : Tuple ) -> Any:
_lowercase = self.get_feature_extractor()
_lowercase = self.get_tokenizer()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
_lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowercase = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
_lowercase = processor.batch_decode(__a ,__a )
_lowercase = list(__a )
with get_context('fork' ).Pool() as p:
_lowercase = decoder.decode_beams_batch(__a ,__a )
_lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a ,decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] ,decoded_processor.text )
self.assertListEqual(__a ,decoded_processor.logit_score )
self.assertListEqual(__a ,decoded_processor.lm_score )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = self.get_feature_extractor()
_lowercase = self.get_tokenizer()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
_lowercase = self._get_dummy_logits()
_lowercase = 15
_lowercase = -20.0
_lowercase = -4.0
_lowercase = processor.batch_decode(
__a ,beam_width=__a ,beam_prune_logp=__a ,token_min_logp=__a ,)
_lowercase = decoded_processor_out.text
_lowercase = list(__a )
with get_context('fork' ).Pool() as pool:
_lowercase = decoder.decode_beams_batch(
__a ,__a ,beam_width=__a ,beam_prune_logp=__a ,token_min_logp=__a ,)
_lowercase = [d[0][0] for d in decoded_decoder_out]
_lowercase = [d[0][2] for d in decoded_decoder_out]
_lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a ,__a )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] ,__a )
self.assertTrue(np.array_equal(__a ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] ,__a ,atol=1e-3 ) )
self.assertTrue(np.array_equal(__a ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] ,__a ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ) -> Any:
_lowercase = self.get_feature_extractor()
_lowercase = self.get_tokenizer()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
_lowercase = self._get_dummy_logits()
_lowercase = 2.0
_lowercase = 5.0
_lowercase = -20.0
_lowercase = True
_lowercase = processor.batch_decode(
__a ,alpha=__a ,beta=__a ,unk_score_offset=__a ,lm_score_boundary=__a ,)
_lowercase = decoded_processor_out.text
_lowercase = list(__a )
decoder.reset_params(
alpha=__a ,beta=__a ,unk_score_offset=__a ,lm_score_boundary=__a ,)
with get_context('fork' ).Pool() as pool:
_lowercase = decoder.decode_beams_batch(
__a ,__a ,)
_lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a ,__a )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] ,__a )
_lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,__a )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_lowercase = processor.decoder.model_container[processor.decoder._model_key]
_lowercase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_lowercase = os.listdir(__a )
_lowercase = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a ,__a )
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
_lowercase = snapshot_download('hf-internal-testing/processor_with_lm' )
_lowercase = WavaVecaProcessorWithLM.from_pretrained(__a )
_lowercase = processor.decoder.model_container[processor.decoder._model_key]
_lowercase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_lowercase = os.listdir(__a )
_lowercase = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a ,__a )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
_lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_lowercase = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
_lowercase = floats_list((3, 1000) )
_lowercase = processor_wavaveca(__a ,return_tensors='np' )
_lowercase = processor_auto(__a ,return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
_lowercase = self._get_dummy_logits()
_lowercase = processor_wavaveca.batch_decode(__a )
_lowercase = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = self.get_feature_extractor()
_lowercase = self.get_tokenizer()
_lowercase = self.get_decoder()
_lowercase = WavaVecaProcessorWithLM(tokenizer=__a ,feature_extractor=__a ,decoder=__a )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='`processor` and `feature_extractor` model input names do not match' ,)
@staticmethod
def __UpperCAmelCase ( __A : Optional[Any] ,__A : Optional[int] ) -> int:
_lowercase = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_lowercase = self._get_dummy_logits()[0]
_lowercase = processor.decode(__a ,output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(__a ,__a ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] ,'word' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] ,'word' ) ,['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] ,'start_offset' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] ,'end_offset' ) ,[1, 3, 5] )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
_lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_lowercase = self._get_dummy_logits()
_lowercase = processor.batch_decode(__a ,output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(__a ,__a ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(__a ,'word' ) ) for o in outputs['word_offsets']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] ,'word' ) ,['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] ,'start_offset' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] ,'end_offset' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
import torch
_lowercase = load_dataset('common_voice' ,'en' ,split='train' ,streaming=__a )
_lowercase = ds.cast_column('audio' ,datasets.Audio(sampling_rate=1_6000 ) )
_lowercase = iter(__a )
_lowercase = next(__a )
_lowercase = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
_lowercase = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowercase = processor(sample['audio']['array'] ,return_tensors='pt' ).input_values
with torch.no_grad():
_lowercase = model(__a ).logits.cpu().numpy()
_lowercase = processor.decode(logits[0] ,output_word_offsets=__a )
_lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowercase = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
_lowercase = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(' '.join(self.get_from_offsets(__a ,'word' ) ) ,__a )
self.assertEqual(' '.join(self.get_from_offsets(__a ,'word' ) ) ,output.text )
# output times
_lowercase = torch.tensor(self.get_from_offsets(__a ,'start_time' ) )
_lowercase = torch.tensor(self.get_from_offsets(__a ,'end_time' ) )
# fmt: off
_lowercase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowercase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a ,__a ,atol=0.01 ) )
self.assertTrue(torch.allclose(__a ,__a ,atol=0.01 ) )
| 707 |
snake_case = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 535 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 94 |
def A(__a: int = 50 ):
lowerCAmelCase_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 122 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = ["""image_processor""", """tokenizer"""]
_snake_case : Dict = """BlipImageProcessor"""
_snake_case : Tuple = """AutoTokenizer"""
def __init__( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = False
super().__init__(lowerCamelCase , lowerCamelCase )
__lowercase = self.image_processor
def __call__( self : Optional[int] , lowerCamelCase : ImageInput = None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__lowercase = self.tokenizer
__lowercase = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
# add pixel_values
__lowercase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
if text is not None:
__lowercase = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
else:
__lowercase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def _snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Optional[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 708 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
snake_case__ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase , __lowercase = get_distrib(node.left )
__lowercase , __lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __snake_case :
def __init__( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ''''''
_lowerCamelCase : str = ''''''
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Tuple = 2_5_6
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Dict = 0
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = cva.imread(__lowerCAmelCase , 0 )
_lowerCamelCase : Tuple = copy.deepcopy(self.img )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
_lowerCamelCase : Dict = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Tuple = x[i] / self.k
self.sk += prk
_lowerCamelCase : str = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase : List[Any] = int(last % last )
_lowerCamelCase : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase : int = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase : Optional[int] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 83 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = 42
class lowerCamelCase__ ( A__ , A__ ):
@register_to_config
def __init__( self : Optional[int] , __a : int = 16 , __a : int = 88 , __a : Optional[int] = None , __a : Optional[int] = None , __a : int = 1 , __a : float = 0.0 , __a : int = 32 , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : str = "geglu" , __a : bool = True , __a : bool = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple = num_attention_heads
lowerCamelCase__: Dict = attention_head_dim
lowerCamelCase__: int = num_attention_heads * attention_head_dim
lowerCamelCase__: List[Any] = in_channels
lowerCamelCase__: List[Any] = torch.nn.GroupNorm(num_groups=__a , num_channels=__a , eps=1e-6 , affine=__a )
lowerCamelCase__: Any = nn.Linear(__a , __a )
# 3. Define transformers blocks
lowerCamelCase__: Any = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , cross_attention_dim=__a , activation_fn=__a , attention_bias=__a , double_self_attention=__a , norm_elementwise_affine=__a , )
for d in range(__a )
] )
lowerCamelCase__: int = nn.Linear(__a , __a )
def lowerCamelCase_ ( self : Any , __a : Any , __a : int=None , __a : List[Any]=None , __a : Dict=None , __a : Optional[int]=1 , __a : Dict=None , __a : bool = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] = hidden_states.shape
lowerCamelCase__: Any = batch_frames // num_frames
lowerCamelCase__: Optional[int] = hidden_states
lowerCamelCase__: int = hidden_states[None, :].reshape(__a , __a , __a , __a , __a )
lowerCamelCase__: Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__: int = self.norm(__a )
lowerCamelCase__: Union[str, Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __a , __a )
lowerCamelCase__: Dict = self.proj_in(__a )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__: Union[str, Any] = block(
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , class_labels=__a , )
# 3. Output
lowerCamelCase__: int = self.proj_out(__a )
lowerCamelCase__: List[Any] = (
hidden_states[None, None, :]
.reshape(__a , __a , __a , __a , __a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__: str = hidden_states.reshape(__a , __a , __a , __a )
lowerCamelCase__: str = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__a )
| 306 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : str = AudioLDMPipeline
snake_case_ : Any = TEXT_TO_AUDIO_PARAMS
snake_case_ : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
snake_case_ : Dict = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCAmelCase , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0)
_snake_case : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
_snake_case : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_snake_case : Any = ClapTextModelWithProjection(lowerCAmelCase)
_snake_case : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77)
_snake_case : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCAmelCase , )
_snake_case : Tuple = SpeechTaHifiGan(lowerCAmelCase)
_snake_case : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=0) -> int:
"""simple docstring"""
if str(lowerCAmelCase).startswith("""mps"""):
_snake_case : List[str] = torch.manual_seed(lowerCAmelCase)
else:
_snake_case : Optional[Any] = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Dict = self.get_dummy_components()
_snake_case : Optional[Any] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Tuple = audioldm_pipe(**lowerCAmelCase)
_snake_case : int = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 256
_snake_case : Union[str, Any] = audio[:10]
_snake_case : Union[str, Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def UpperCamelCase_ ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : int = audioldm_pipe.to(lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[Any] = 3 * [inputs["""prompt"""]]
# forward
_snake_case : str = audioldm_pipe(**lowerCAmelCase)
_snake_case : Dict = output.audios[0]
_snake_case : Dict = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[str] = 3 * [inputs.pop("""prompt""")]
_snake_case : List[Any] = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
_snake_case : List[Any] = text_inputs["""input_ids"""].to(lowerCAmelCase)
_snake_case : str = audioldm_pipe.text_encoder(
lowerCAmelCase , )
_snake_case : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : Tuple = F.normalize(lowerCAmelCase , dim=-1)
_snake_case : List[Any] = prompt_embeds
# forward
_snake_case : Union[str, Any] = audioldm_pipe(**lowerCAmelCase)
_snake_case : str = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Optional[Any] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Tuple = audioldm_pipe.to(lowerCAmelCase)
_snake_case : List[Any] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Optional[int] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Optional[Any] = 3 * ["""this is a negative prompt"""]
_snake_case : int = negative_prompt
_snake_case : Dict = 3 * [inputs["""prompt"""]]
# forward
_snake_case : Dict = audioldm_pipe(**lowerCAmelCase)
_snake_case : Union[str, Any] = output.audios[0]
_snake_case : str = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Union[str, Any] = 3 * [inputs.pop("""prompt""")]
_snake_case : Tuple = []
for p in [prompt, negative_prompt]:
_snake_case : Optional[Any] = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
_snake_case : Any = text_inputs["""input_ids"""].to(lowerCAmelCase)
_snake_case : int = audioldm_pipe.text_encoder(
lowerCAmelCase , )
_snake_case : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : str = F.normalize(lowerCAmelCase , dim=-1)
embeds.append(lowerCAmelCase)
_snake_case : int = embeds
# forward
_snake_case : List[str] = audioldm_pipe(**lowerCAmelCase)
_snake_case : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase)
_snake_case : Tuple = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : List[Any] = """egg cracking"""
_snake_case : Optional[Any] = audioldm_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase)
_snake_case : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 256
_snake_case : int = audio[:10]
_snake_case : Any = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def UpperCamelCase_ ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Any = self.get_dummy_components()
_snake_case : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase)
_snake_case : Tuple = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Union[str, Any] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_snake_case : List[Any] = audioldm_pipe(lowerCAmelCase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_snake_case : Optional[Any] = 2
_snake_case : Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_snake_case : List[Any] = 2
_snake_case : Tuple = audioldm_pipe(lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_snake_case : Dict = 2
_snake_case : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : Any = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : str = audioldm_pipe.vocoder.config.sampling_rate
_snake_case : List[str] = self.get_dummy_inputs(lowerCAmelCase)
_snake_case : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **lowerCAmelCase)
_snake_case : str = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) / vocoder_sampling_rate == 0.016
_snake_case : int = audioldm_pipe(audio_length_in_s=0.032 , **lowerCAmelCase)
_snake_case : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) / vocoder_sampling_rate == 0.032
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : str = self.get_dummy_components()
_snake_case : List[str] = AudioLDMPipeline(**lowerCAmelCase)
_snake_case : str = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : List[str] = ["""hey"""]
_snake_case : int = audioldm_pipe(lowerCAmelCase , num_inference_steps=1)
_snake_case : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
_snake_case : Dict = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_snake_case : str = SpeechTaHifiGan(lowerCAmelCase).to(lowerCAmelCase)
_snake_case : List[str] = audioldm_pipe(lowerCAmelCase , num_inference_steps=1)
_snake_case : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCamelCase_ ( self : Dict) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCAmelCase)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase)
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]="cpu" , lowerCAmelCase : Any=torch.floataa , lowerCAmelCase : Optional[Any]=0) -> Tuple:
"""simple docstring"""
_snake_case : Any = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Optional[Any] = np.random.RandomState(lowerCAmelCase).standard_normal((1, 8, 128, 16))
_snake_case : List[str] = torch.from_numpy(lowerCAmelCase).to(device=lowerCAmelCase , dtype=lowerCAmelCase)
_snake_case : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCamelCase_ ( self : Any) -> List[str]:
"""simple docstring"""
_snake_case : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Tuple = self.get_inputs(lowerCAmelCase)
_snake_case : int = 25
_snake_case : Optional[Any] = audioldm_pipe(**lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 8_1920
_snake_case : Any = audio[7_7230:7_7240]
_snake_case : str = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315])
_snake_case : List[str] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def UpperCamelCase_ ( self : Dict) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
_snake_case : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
_snake_case : Optional[int] = audioldm_pipe.to(lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Union[str, Any] = self.get_inputs(lowerCAmelCase)
_snake_case : List[str] = audioldm_pipe(**lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase) == 8_1920
_snake_case : Optional[Any] = audio[2_7780:2_7790]
_snake_case : int = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212])
_snake_case : List[Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 707 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Optional[Any] = CTRLTokenizer
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = False
def UpperCamelCase_ ( self : int) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
_snake_case : Optional[int] = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
_snake_case : int = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(lowerCAmelCase))
def UpperCamelCase_ ( self : Dict , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
_snake_case : Tuple = """adapt react readapt apt"""
_snake_case : Tuple = """adapt react readapt apt"""
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_snake_case : Dict = """adapt react readapt apt"""
_snake_case : int = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
_snake_case : Tuple = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase)
| 198 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''encoder-decoder'''
__snake_case = True
def __init__( self : List[Any] , **__UpperCAmelCase : str ) ->Any:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a = kwargs.pop('''encoder''' )
a = encoder_config.pop('''model_type''' )
a = kwargs.pop('''decoder''' )
a = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
a = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase )
a = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase )
a = True
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : Tuple ) ->PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
a = True
a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.encoder.to_dict()
a = self.decoder.to_dict()
a = self.__class__.model_type
return output
| 117 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase__ = logging.getLogger(__name__)
def _a ( a :Union[str, Any] , a :Tuple ) -> Optional[Any]:
# save results
if os.path.exists(a ):
if os.path.exists(os.path.join(a , '''config.json''' ) ) and os.path.isfile(
os.path.join(a , '''config.json''' ) ):
os.remove(os.path.join(a , '''config.json''' ) )
if os.path.exists(os.path.join(a , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(a , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(a , '''pytorch_model.bin''' ) )
else:
os.makedirs(a )
model.save_pretrained(a )
def _a ( a :List[Any] , a :Union[str, Any]=False ) -> int:
a = 2
if unlogit:
a = torch.pow(a , a )
a = p * torch.log(a )
a = 0
return -plogp.sum(dim=-1 )
def _a ( a :List[str] ) -> Union[str, Any]:
logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(a ) ) ) )
for row in range(len(a ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _a ( a :Optional[int] , a :Dict , a :Tuple , a :Tuple=True , a :Union[str, Any]=True , a :str=None , a :Union[str, Any]=False ) -> int:
a , a = model.config.num_hidden_layers, model.config.num_attention_heads
a = torch.zeros(a , a ).to(args.device )
a = torch.zeros(a , a ).to(args.device )
if head_mask is None:
a = torch.ones(a , a ).to(args.device )
head_mask.requires_grad_(requires_grad=a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a = None
a = 0.0
a = 0.0
for step, inputs in enumerate(tqdm(a , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
a = tuple(t.to(args.device ) for t in inputs )
((a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a = model(a , labels=a , head_mask=a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a , a , a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(a ):
a = entropy(attn.detach() , a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a = 2
a = torch.pow(torch.pow(a , a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(a )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(a )
logger.info('''Head ranked by importance scores''' )
a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a = torch.arange(
head_importance.numel() , device=args.device )
a = head_ranks.view_as(a )
print_ad_tensor(a )
return attn_entropy, head_importance, total_loss
def _a ( a :Optional[Any] , a :List[Any] , a :str ) -> Optional[Any]:
a , a , a = compute_heads_importance(a , a , a , compute_entropy=a )
a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , a , original_score * args.masking_threshold )
a = torch.ones_like(a )
a = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a = original_score
while current_score >= original_score * args.masking_threshold:
a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a = float('''Inf''' )
a = head_importance.view(-1 ).sort()[1]
if len(a ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
a = new_head_mask.view(-1 )
a = 0.0
a = new_head_mask.view_as(a )
a = new_head_mask.clone().detach()
print_ad_tensor(a )
# Compute metric and head importance again
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , head_mask=a )
a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(a )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( a :List[Any] , a :Optional[int] , a :Tuple , a :List[str] ) -> List[str]:
a = datetime.now()
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a )
a = 1 / loss
a = datetime.now() - before_time
a = sum(p.numel() for p in model.parameters() )
a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a ) )
}
for k, v in heads_to_prune.items():
if isinstance(a , a ):
a = [
v,
]
assert sum(len(a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(a )
a = sum(p.numel() for p in model.parameters() )
a = datetime.now()
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a , actually_pruned=a , )
a = 1 / loss
a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , a , a , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , a , a )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(a , args.output_dir )
def _a ( ) -> int:
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a , type=a , required=a , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a , type=a , required=a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=a , type=a , required=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=a , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=a , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=a , type=a , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=a , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=a , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=a , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=a , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=a , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=a , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=a , default=42 )
parser.add_argument('''--local_rank''' , type=a , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a = torch.device('''cuda''' , args.local_rank )
a = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a = nn.parallel.DistributedDataParallel(
a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=a )
elif args.n_gpu > 1:
a = nn.DataParallel(a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=a )
torch.save(a , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , a )
# Prepare dataset
a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a = (torch.from_numpy(a ),)
a = TensorDataset(*a )
a = RandomSampler(a )
a = DataLoader(a , sampler=a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(a , a , a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a = mask_heads(a , a , a )
prune_heads(a , a , a , a )
if __name__ == "__main__":
main()
| 117 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
__UpperCamelCase : List[Any] = """encoder-decoder"""
__UpperCamelCase : str = True
def __init__( self :List[Any] , **SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_a : Dict =kwargs.pop("""encoder""" )
_a : Optional[int] =encoder_config.pop("""model_type""" )
_a : Optional[int] =kwargs.pop("""decoder""" )
_a : Any =decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_a : Optional[Any] =AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
_a : Tuple =AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
_a : Union[str, Any] =True
@classmethod
def __UpperCAmelCase ( cls :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :str , **SCREAMING_SNAKE_CASE :int ) -> Optional[Any]:
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_a : Optional[int] =True
_a : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_ )
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : Dict =copy.deepcopy(self.__dict__ )
_a : Union[str, Any] =self.encoder.to_dict()
_a : List[str] =self.decoder.to_dict()
_a : str =self.__class__.model_type
return output
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Optional[int] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 506 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase: Optional[int] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: str = ["""GLPNFeatureExtractor"""]
__UpperCamelCase: Dict = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: str = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__UpperCamelCase: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 266 |
import argparse
import os
import re
__UpperCamelCase: Any = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__UpperCamelCase: Dict = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
__UpperCamelCase: List[str] = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : bool = False ) -> List[str]:
'''simple docstring'''
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
lowercase__ : Union[str, Any] = f.read()
lowercase__ : Optional[Any] = content.split('\n' )
lowercase__ : Optional[int] = []
lowercase__ : int = 0
while line_idx < len(_lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase__ : Tuple = len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase__ : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase__ : Any = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase__ : List[str] = sorted(_lowercase , key=lambda _lowercase : _re_identifier.search(_lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_lowercase ) )
elif "\n".join(_lowercase ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( _lowercase : bool = False ) -> List[Any]:
'''simple docstring'''
lowercase__ : List[Any] = [os.path.join(_lowercase , _lowercase ) for f in os.listdir(_lowercase ) if f.endswith('.py' )]
lowercase__ : str = [sort_auto_mapping(_lowercase , overwrite=_lowercase ) for fname in fnames]
if not overwrite and any(_lowercase ):
lowercase__ : List[Any] = [f for f, d in zip(_lowercase , _lowercase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {", ".join(_lowercase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
__UpperCamelCase: Tuple = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__UpperCamelCase: List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 266 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class UpperCamelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ =field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase_ =Features({"text": Value("string" )} )
UpperCAmelCase_ =Features({} )
UpperCAmelCase_ ="text"
@property
def _UpperCamelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 719 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = "pytorch_model.bin"
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase_ =dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase_ =dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase_ =dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase_ =dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase_ =dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase_ =dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase_ =dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Random seed for initialization."} , )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = datasets.concatenate_datasets([infer_input, infer_output], axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE_ = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE_ = int(eval_result * len(__lowerCamelCase ) )
print(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dataset.sort('''probability''', reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dataset.select(range(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = dataset.remove_columns(['''label''', '''probability'''] )
SCREAMING_SNAKE_CASE_ = dataset.rename_column('''prediction''', '''label''' )
SCREAMING_SNAKE_CASE_ = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE_ = dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCamelCase, index=__lowerCamelCase )
else:
dataset.to_json(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, **__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE_ = STModelArguments(model_name_or_path=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = STDataArguments(train_file=__lowerCamelCase, infer_file=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = STTrainingArguments(output_dir=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCamelCase ).items():
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for key, value in kwargs.items():
if hasattr(__lowerCamelCase, __lowerCamelCase ):
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Sanity checks
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE_ = args.train_file
SCREAMING_SNAKE_CASE_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE_ = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
SCREAMING_SNAKE_CASE_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
SCREAMING_SNAKE_CASE_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=__lowerCamelCase )
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
# Show the progress bar
SCREAMING_SNAKE_CASE_ = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE_ = data_dir_format(__lowerCamelCase )
assert os.path.exists(__lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''stage-1''' )
SCREAMING_SNAKE_CASE_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCamelCase, __lowerCamelCase ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''', __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', __lowerCamelCase, __lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''', __lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''' )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''stage-2''' )
# Update arguments_dict
SCREAMING_SNAKE_CASE_ = model_path
SCREAMING_SNAKE_CASE_ = data_files['''train''']
SCREAMING_SNAKE_CASE_ = current_output_dir
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''', __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', __lowerCamelCase, __lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = iteration
SCREAMING_SNAKE_CASE_ = data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase, '''best-checkpoint''' ) )
SCREAMING_SNAKE_CASE_ = config.idalabel
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''eval_results_best-checkpoint.json''' )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''test_results_best-checkpoint.json''' )
assert os.path.exists(__lowerCamelCase )
with open(__lowerCamelCase, '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = float(json.load(__lowerCamelCase )[args.eval_metric] )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__lowerCamelCase )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE_ = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data''']
SCREAMING_SNAKE_CASE_ = load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
shutil.copy(__lowerCamelCase, os.path.join(__lowerCamelCase, F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__lowerCamelCase ):
shutil.copy(__lowerCamelCase, os.path.join(__lowerCamelCase, F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE_ = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
SCREAMING_SNAKE_CASE_ = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''', __lowerCamelCase )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase, F'''eval_results_iter-{iteration}.json''' ), os.path.join(__lowerCamelCase, '''eval_results_best-iteration.json''' ), )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase, F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ), os.path.join(__lowerCamelCase, '''eval_results_best-iteration.json''' ), )
| 597 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Any , __A: List[str] , __A: Optional[Any]=13 , __A: int=30 , __A: Tuple=2 , __A: List[Any]=3 , __A: Optional[int]=True , __A: str=True , __A: Union[str, Any]=32 , __A: Dict=5 , __A: List[Any]=4 , __A: str=37 , __A: Optional[int]="gelu" , __A: Tuple=0.1 , __A: Tuple=0.1 , __A: int=10 , __A: List[str]=0.02 , __A: List[str]=3 , __A: str=0.6 , __A: str=None , ) -> List[str]:
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self: Optional[Any] ) -> Any:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def __A ( self: Union[str, Any] ) -> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self: Dict , __A: Dict , __A: str , __A: Optional[Any] ) -> List[str]:
_A = ViTMAEModel(config=__A )
model.to(__A )
model.eval()
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self: Dict , __A: int , __A: Optional[Any] , __A: Dict ) -> Optional[int]:
_A = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
_A = model(__A )
_A = (self.image_size // self.patch_size) ** 2
_A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A = 1
_A = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(__A )
_A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
_A ,_A ,_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A_ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: str ) -> Tuple:
_A = ViTMAEModelTester(self )
_A = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __A ( self: List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self: Optional[int] ) -> Dict:
pass
def __A ( self: Any ) -> List[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __A ( self: Tuple ) -> Optional[int]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def __A ( self: Tuple ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self: Optional[Any] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __A ( self: Any , __A: str , __A: Optional[Any] , __A: Optional[Any] ) -> str:
# make masks reproducible
np.random.seed(2 )
_A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A = torch.from_numpy(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A = pt_noise
super().check_pt_tf_models(__A , __A , __A )
def __A ( self: str ) -> Optional[int]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
model.to(__A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
_A = outputs[0].cpu().numpy()
_A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_A = model_class.from_pretrained(__A )
model.to(__A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
# Make sure we don't have nans
_A = after_outputs[0].cpu().numpy()
_A = 0
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self: Union[str, Any] ) -> Any:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self: Tuple ) -> Union[str, Any]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self: str ) -> Dict:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self: Optional[Any] ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: int ) -> int:
pass
@slow
def __A ( self: Any ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ViTMAEModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: Optional[int] ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self: str ) -> Optional[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_A = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A = ViTMAEConfig()
_A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_A = model(**__A , noise=torch.from_numpy(__A ).to(device=__A ) )
# verify the logits
_A = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __A )
_A = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__A ) , atol=1e-4 ) )
| 484 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A = logging.get_logger(__name__)
__A = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "imagegpt"
A_ = ["past_key_values"]
A_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Union[str, Any] , __A: int=5_12 + 1 , __A: str=32 * 32 , __A: Tuple=5_12 , __A: Any=24 , __A: Dict=8 , __A: str=None , __A: Any="quick_gelu" , __A: Tuple=0.1 , __A: Optional[Any]=0.1 , __A: Any=0.1 , __A: Tuple=1e-5 , __A: Optional[int]=0.02 , __A: List[Any]=True , __A: Any=True , __A: Optional[Any]=False , __A: Optional[int]=False , __A: Optional[int]=False , **__A: str , ) -> List[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = scale_attn_by_inverse_layer_idx
_A = reorder_and_upcast_attn
_A = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __A ( self: Any , __A: "FeatureExtractionMixin" , __A: int = 1 , __A: int = -1 , __A: bool = False , __A: Optional["TensorType"] = None , __A: int = 3 , __A: int = 32 , __A: int = 32 , ) -> Mapping[str, Any]:
_A = self._generate_dummy_images(__A , __A , __A , __A )
_A = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 484 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
UpperCAmelCase__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(lowercase ,"""r""" ) as f:
_UpperCAmelCase = f.read().splitlines()
return [l.strip() for l in lines]
class a ( lowerCAmelCase_ ):
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Optional[Any]="<cls>" , __lowerCAmelCase : List[Any]="<pad>" , __lowerCAmelCase : Dict="<mask>" , __lowerCAmelCase : Any="<eos>" , **__lowerCAmelCase : List[str] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = load_vocab_file(__lowerCAmelCase )
_UpperCAmelCase = dict(enumerate(self.all_tokens ) )
_UpperCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_UpperCAmelCase = unk_token
_UpperCAmelCase = cls_token
_UpperCAmelCase = pad_token
_UpperCAmelCase = mask_token
_UpperCAmelCase = eos_token
_UpperCAmelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int ):
return self._id_to_token.get(__lowerCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str ):
return self._token_to_id.get(__lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[str] , **__lowerCAmelCase : Tuple ):
return text.split()
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int=False ):
return len(self._id_to_token )
def lowerCAmelCase_ ( self : Dict ):
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str ):
return self._token_to_id.get(__lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : int ):
return self._id_to_token.get(__lowerCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List , __lowerCAmelCase : Optional[List] = None , __lowerCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_UpperCAmelCase = [1] + ([0] * len(__lowerCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__lowerCAmelCase ) + [1]
return mask
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = os.path.join(__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(__lowerCAmelCase , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase_ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[List[str], List[AddedToken]] , __lowerCAmelCase : bool = False ):
return super()._add_tokens(__lowerCAmelCase , special_tokens=__lowerCAmelCase )
| 275 | """simple docstring"""
import unittest
from knapsack import knapsack as k
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 5 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 100, 120]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 275 | 1 |
"""simple docstring"""
a_ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
__lowercase : Stack[int] = Stack()
__lowercase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase_ )
elif i == ")":
# RULE 4
__lowercase : Any = operator_stack.peek()
operator_stack.pop()
__lowercase : List[Any] = operand_stack.peek()
operand_stack.pop()
__lowercase : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
__lowercase : List[Any] = operators[opr](lowercase_ , lowercase_ )
operand_stack.push(lowercase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a_ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 76 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : Any , lowercase_ : int=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
__SCREAMING_SNAKE_CASE : str = nn.Parameter(lowercase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(lowercase_ )
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase_ ( lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE : Any = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE : Tuple = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : List[str] , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = weights[0][0][0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE : List[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE : Tuple = weights[0][1]
if len(lowercase_ ) < 4:
set_layer_weights_in_torch_lsh(lowercase_ , torch_block.attention , lowercase_ )
else:
set_layer_weights_in_torch_local(lowercase_ , torch_block.attention , lowercase_ )
# intermediate weighs
__SCREAMING_SNAKE_CASE : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase_ ) == 4:
__SCREAMING_SNAKE_CASE : List[str] = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE : List[str] = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE : int = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE : Any = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowercase_ ) , )
if isinstance(weights[3] , lowercase_ ):
__SCREAMING_SNAKE_CASE : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.tensor(lowercase_ ) )
__SCREAMING_SNAKE_CASE : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase_ , lowercase_ , lowercase_ )
# output layer norm
__SCREAMING_SNAKE_CASE : List[str] = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE : List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = ReformerConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : List[str] = ReformerModelWithLMHead(lowercase_ )
with open(lowercase_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : int = pickle.load(lowercase_ )['''weights''']
set_model_weights_in_torch(lowercase_ , lowercase_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = hf_hub_url(repo_id=SCREAMING_SNAKE_CASE , path=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(SCREAMING_SNAKE_CASE )}'
| 233 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : List[str] = ['image_processor', 'tokenizer']
__snake_case : Optional[int] = 'Pix2StructImageProcessor'
__snake_case : Optional[int] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = False
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = 2_048 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ :Any = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ :List[Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ :Any = text_encoding.pop('input_ids' )
else:
SCREAMING_SNAKE_CASE_ :Any = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ :Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 233 | 1 |
def A_ ( lowercase_ = 100 ) -> Dict:
_snake_case : str = (n * (n + 1) // 2) ** 2
_snake_case : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326 |
import math
def __lowerCAmelCase ( ):
lowercase__ = input("Enter message: " )
lowercase__ = int(input(f'''Enter key [2-{len(SCREAMING_SNAKE_CASE_ ) - 1}]: ''' ) )
lowercase__ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
lowercase__ = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith("d" ):
lowercase__ = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + "|"}''' )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = [""] * key
for col in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = col
while pointer < len(SCREAMING_SNAKE_CASE_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = math.ceil(len(SCREAMING_SNAKE_CASE_ ) / key )
lowercase__ = key
lowercase__ = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE_ )
lowercase__ = [""] * num_cols
lowercase__ = 0
lowercase__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase__ = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 413 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 715 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =(3, 3_2, 1_2_8)
__A =tempfile.mkdtemp()
# fmt: off
__A =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__A =dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
__A ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 3_2, '''width''': 1_2_8},
}
__A =os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase__ , lowercase__ )
def __UpperCamelCase ( self , **lowercase__ ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase ( self , **lowercase__ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
__A =Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_tokenizer()
__A =self.get_image_processor()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
__A =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_tokenizer()
__A =self.get_image_processor()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
__A =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__A =self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
__A =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =self.prepare_image_inputs()
__A =image_processor(lowercase__ , return_tensors='''np''' )
__A =processor(images=lowercase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A ='''test'''
__A =processor(text=lowercase__ )
__A =tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A ='''test'''
__A =self.prepare_image_inputs()
__A =processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__A =processor.char_decode(lowercase__ )
__A =tokenizer.batch_decode(lowercase__ )
__A =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =None
__A =self.prepare_image_inputs()
__A =processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_image_processor()
__A =self.get_tokenizer()
__A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__A =torch.randn(1 , 2_7 , 3_8 )
__A =torch.randn(1 , 2_7 , 5_0_2_5_7 )
__A =torch.randn(1 , 2_7 , 3_0_5_2_2 )
__A =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 516 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
snake_case__ : List[Any] = get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Any , __a : Optional[str] = None ) ->Any:
lowerCamelCase_ : int = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase_ : Optional[Any] = Extractor
def _lowerCAmelCase ( self : Tuple , __a : str ) ->str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase_ : List[Any] = os.path.abspath(__a )
return os.path.join(self.extract_dir , hash_url_to_filename(__a ) )
def _lowerCAmelCase ( self : Optional[int] , __a : str , __a : bool ) ->bool:
return force_extract or (
not os.path.isfile(__a ) and not (os.path.isdir(__a ) and os.listdir(__a ))
)
def _lowerCAmelCase ( self : str , __a : str , __a : bool = False ) ->str:
lowerCamelCase_ : Optional[int] = self.extractor.infer_extractor_format(__a )
if not extractor_format:
return input_path
lowerCamelCase_ : Any = self._get_output_path(__a )
if self._do_extract(__a , __a ):
self.extractor.extract(__a , __a , __a )
return output_path
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowerCAmelCase ( cls : Dict , __a : Union[Path, str] , **__a : Optional[int] ) ->bool:
...
@staticmethod
@abstractmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
...
class SCREAMING_SNAKE_CASE_ (a__ , a__ ):
'''simple docstring'''
_a = []
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : int ) ->List[str]:
with open(__a , """rb""" ) as f:
return f.read(__a )
@classmethod
def _lowerCAmelCase ( cls : str , __a : Union[Path, str] , __a : bytes = b"" ) ->bool:
if not magic_number:
lowerCamelCase_ : Dict = max(len(__a ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase_ : Optional[Any] = cls.read_magic_number(__a , __a )
except OSError:
return False
return any(magic_number.startswith(__a ) for cls_magic_number in cls.magic_numbers )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls : Tuple , __a : Union[Path, str] , **__a : Any ) ->bool:
return tarfile.is_tarfile(__a )
@staticmethod
def _lowerCAmelCase ( __a : Any , __a : str ) ->Union[str, Any]:
def resolved(__a : str ) -> str:
return os.path.realpath(os.path.abspath(__a ) )
def badpath(__a : str , __a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a ) ).startswith(__a )
def badlink(__a : int , __a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase_ : List[Any] = resolved(os.path.join(__a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__a )
lowerCamelCase_ : Union[str, Any] = resolved(__a )
for finfo in members:
if badpath(finfo.name , __a ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__a , __a ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__a , __a ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
os.makedirs(__a , exist_ok=__a )
lowerCamelCase_ : List[Any] = tarfile.open(__a )
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a ) )
tar_file.close()
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [b"\x1F\x8B"]
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
with gzip.open(__a , """rb""" ) as gzip_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _lowerCAmelCase ( cls : Tuple , __a : Union[Path, str] , __a : bytes = b"" ) ->bool:
if super().is_extractable(__a , magic_number=__a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , """rb""" ) as fp:
lowerCamelCase_ : int = _EndRecData(__a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase_ : Optional[Any] = fp.read(__a ) # CD is where we expect it to be
if len(__a ) == sizeCentralDir:
lowerCamelCase_ : Optional[Any] = struct.unpack(__a , __a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
os.makedirs(__a , exist_ok=__a )
with zipfile.ZipFile(__a , """r""" ) as zip_file:
zip_file.extractall(__a )
zip_file.close()
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
with lzma.open(__a ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__a , exist_ok=__a )
lowerCamelCase_ : Optional[int] = rarfile.RarFile(__a )
rf.extractall(__a )
rf.close()
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase_ : str = zstd.ZstdDecompressor()
with open(__a , """rb""" ) as ifh, open(__a , """wb""" ) as ofh:
dctx.copy_stream(__a , __a )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [b"\x42\x5A\x68"]
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
with bza.open(__a , """rb""" ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__a , exist_ok=__a )
with pyazr.SevenZipFile(__a , """r""" ) as archive:
archive.extractall(__a )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = [b"\x04\x22\x4D\x18"]
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : Union[Path, str] ) ->None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__a , """rb""" ) as compressed_file:
with open(__a , """wb""" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowerCAmelCase ( cls : List[str] ) ->Any:
return max(
len(__a )
for extractor in cls.extractors.values()
if issubclass(__a , __a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowerCAmelCase ( __a : Union[Path, str] , __a : int ) ->Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a )
except OSError:
return b""
@classmethod
def _lowerCAmelCase ( cls : str , __a : Union[Path, str] , __a : bool = False ) ->bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__a , )
lowerCamelCase_ : int = cls.infer_extractor_format(__a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowerCAmelCase ( cls : int , __a : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
lowerCamelCase_ : List[Any] = cls._get_magic_number_max_length()
lowerCamelCase_ : str = cls._read_magic_number(__a , __a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a ):
return extractor_format
@classmethod
def _lowerCAmelCase ( cls : Dict , __a : Union[Path, str] , __a : Union[Path, str] , __a : Optional[str] = None , __a : Optional[BaseExtractor] = "deprecated" , ) ->None:
os.makedirs(os.path.dirname(__a ) , exist_ok=__a )
# Prevent parallel extractions
lowerCamelCase_ : List[str] = str(Path(__a ).with_suffix(""".lock""" ) )
with FileLock(__a ):
shutil.rmtree(__a , ignore_errors=__a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__a , )
lowerCamelCase_ : List[Any] = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase_ : List[str] = cls.extractors[extractor_format]
return extractor.extract(__a , __a )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a ):
return extractor.extract(__a , __a )
| 278 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( A__ : Optional[Any] ) -> List[str]:
lowerCamelCase_ : int = """huggingface/label-files"""
lowerCamelCase_ : Dict = """imagenet-1k-id2label.json"""
lowerCamelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ : str = {int(A__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase_ : str = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCamelCase_ : Optional[Any] = BitConfig(
conv_layer=A__ , num_labels=1000 , idalabel=A__ , labelaid=A__ , )
return config
def __lowerCamelCase ( A__ : str ) -> Any:
if "stem.conv" in name:
lowerCamelCase_ : Any = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase_ : Dict = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCamelCase_ : Optional[Any] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCamelCase_ : int = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCamelCase_ : str = """bit.encoder.""" + name
return name
def __lowerCamelCase ( ) -> List[Any]:
lowerCamelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ : List[Any] , A__ : List[str] , A__ : Tuple=False ) -> List[str]:
lowerCamelCase_ : Optional[Any] = get_config(A__ )
# load original model from timm
lowerCamelCase_ : Optional[Any] = create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model
lowerCamelCase_ : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCamelCase_ : int = state_dict.pop(A__ )
lowerCamelCase_ : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCamelCase_ : Tuple = BitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# create image processor
lowerCamelCase_ : Optional[Any] = create_transform(**resolve_data_config({} , model=A__ ) )
lowerCamelCase_ : List[Any] = transform.transforms
lowerCamelCase_ : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCamelCase_ : List[str] = BitImageProcessor(
do_resize=A__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ : int = prepare_img()
lowerCamelCase_ : int = transform(A__ ).unsqueeze(0 )
lowerCamelCase_ : List[str] = processor(A__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(A__ , A__ )
# verify logits
with torch.no_grad():
lowerCamelCase_ : str = model(A__ )
lowerCamelCase_ : int = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCamelCase_ : List[Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
snake_case__ : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( _lowercase):
snake_case__ : Dict = "Salesforce/blip-image-captioning-base"
snake_case__ : Tuple = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case__ : List[Any] = "image_captioner"
snake_case__ : List[str] = AutoModelForVisionaSeq
snake_case__ : Any = ["image"]
snake_case__ : Optional[int] = ["text"]
def __init__( self : Dict , *__lowerCAmelCase : int , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )[0].strip()
| 598 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Union[str, Any]=3_2 ):
"""simple docstring"""
set_seed(0 )
_lowerCamelCase : str = UNetaDModel(sample_size=__lowerCAmelCase , in_channels=3 , out_channels=3 )
_lowerCamelCase : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_lowerCamelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_lowerCamelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(__lowerCAmelCase ) for _ in range(4 )]
_lowerCamelCase : List[Any] = [torch.randn((4, 3, 3_2, 3_2) ).to(__lowerCAmelCase ) for _ in range(4 )]
_lowerCamelCase : Any = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(__lowerCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
_lowerCamelCase , _lowerCamelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowerCamelCase : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowerCamelCase : Any = model(__lowerCAmelCase , timesteps[i] ).sample
_lowerCamelCase : List[str] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_lowerCamelCase , _lowerCamelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowerCamelCase : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , timesteps[i] ).sample
_lowerCamelCase : List[Any] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
| 598 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = is_small_dataset(SCREAMING_SNAKE_CASE )
assert result == expected
| 205 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = '''mobilenet_v2'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.8 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=0.0_0_1 , SCREAMING_SNAKE_CASE=255 , **SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = depth_multiplier
SCREAMING_SNAKE_CASE_ = depth_divisible_by
SCREAMING_SNAKE_CASE_ = min_depth
SCREAMING_SNAKE_CASE_ = expand_ratio
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ = finegrained_output
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = tf_padding
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = semantic_loss_ignore_index
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = version.parse('''1.11''' )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_( self ) -> float:
"""simple docstring"""
return 1e-4
| 205 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a__ ( lowercase_ ):
UpperCAmelCase__ = 42
class a__ ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self :Optional[int] , _lowerCamelCase :int = 32 , _lowerCamelCase :int = 64 , _lowerCamelCase :int = 20 , _lowerCamelCase :int = 768 , _lowerCamelCase :Union[str, Any]=77 , _lowerCamelCase :List[Any]=4 , _lowerCamelCase :float = 0.0 , _lowerCamelCase :str = "silu" , _lowerCamelCase :Optional[str] = None , _lowerCamelCase :Optional[str] = None , _lowerCamelCase :Optional[str] = "linear" , _lowerCamelCase :Optional[str] = "prd" , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :Optional[int] = None , ):
'''simple docstring'''
super().__init__()
UpperCamelCase_ : Dict =num_attention_heads
UpperCamelCase_ : Union[str, Any] =attention_head_dim
UpperCamelCase_ : Union[str, Any] =num_attention_heads * attention_head_dim
UpperCamelCase_ : Dict =additional_embeddings
UpperCamelCase_ : int =time_embed_dim or inner_dim
UpperCamelCase_ : Any =embedding_proj_dim or embedding_dim
UpperCamelCase_ : Tuple =clip_embed_dim or embedding_dim
UpperCamelCase_ : Union[str, Any] =Timesteps(lowerCamelCase_ , lowerCamelCase_ , 0 )
UpperCamelCase_ : List[str] =TimestepEmbedding(lowerCamelCase_ , lowerCamelCase_ , out_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ )
UpperCamelCase_ : str =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if embedding_proj_norm_type is None:
UpperCamelCase_ : Tuple =None
elif embedding_proj_norm_type == "layer":
UpperCamelCase_ : str =nn.LayerNorm(lowerCamelCase_ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCamelCase_ : Tuple =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if encoder_hid_proj_type is None:
UpperCamelCase_ : Dict =None
elif encoder_hid_proj_type == "linear":
UpperCamelCase_ : int =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCamelCase_ : str =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase_ ) )
if added_emb_type == "prd":
UpperCamelCase_ : str =nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase_ ) )
elif added_emb_type is None:
UpperCamelCase_ : Any =None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCamelCase_ : List[str] =nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , activation_fn='gelu' , attention_bias=lowerCamelCase_ , )
for d in range(lowerCamelCase_ )
] )
if norm_in_type == "layer":
UpperCamelCase_ : List[str] =nn.LayerNorm(lowerCamelCase_ )
elif norm_in_type is None:
UpperCamelCase_ : List[str] =None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCamelCase_ : Optional[int] =nn.LayerNorm(lowerCamelCase_ )
UpperCamelCase_ : Dict =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : Dict =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase_ : Optional[int] =causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase_ , persistent=lowerCamelCase_ )
UpperCamelCase_ : Optional[int] =nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
UpperCamelCase_ : Dict =nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] ={}
def fn_recursive_add_processors(_lowerCamelCase :str , _lowerCamelCase :torch.nn.Module , _lowerCamelCase :Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
UpperCamelCase_ : Tuple =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCamelCase_ , lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return processors
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_lowerCamelCase :str , _lowerCamelCase :torch.nn.Module , _lowerCamelCase :Optional[Any] ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCamelCase_ , lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :Tuple , _lowerCamelCase :Union[torch.Tensor, float, int] , _lowerCamelCase :torch.FloatTensor , _lowerCamelCase :Optional[torch.FloatTensor] = None , _lowerCamelCase :Optional[torch.BoolTensor] = None , _lowerCamelCase :bool = True , ):
'''simple docstring'''
UpperCamelCase_ : int =hidden_states.shape[0]
UpperCamelCase_ : str =timestep
if not torch.is_tensor(lowerCamelCase_ ):
UpperCamelCase_ : Optional[Any] =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
UpperCamelCase_ : Optional[Any] =timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase_ : List[str] =timesteps * torch.ones(lowerCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase_ : List[str] =self.time_proj(lowerCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase_ : Any =timesteps_projected.to(dtype=self.dtype )
UpperCamelCase_ : str =self.time_embedding(lowerCamelCase_ )
if self.embedding_proj_norm is not None:
UpperCamelCase_ : Tuple =self.embedding_proj_norm(lowerCamelCase_ )
UpperCamelCase_ : List[str] =self.embedding_proj(lowerCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase_ : Tuple =self.encoder_hidden_states_proj(lowerCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
UpperCamelCase_ : List[str] =self.proj_in(lowerCamelCase_ )
UpperCamelCase_ : List[Any] =self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase_ : Optional[Any] =[]
UpperCamelCase_ : Tuple =0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase_ : Tuple =proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase_ : Tuple =hidden_states[:, None, :]
UpperCamelCase_ : List[Any] =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase_ : Optional[int] =self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase_ , -1 , -1 )
additional_embeds.append(lowerCamelCase_ )
UpperCamelCase_ : List[Any] =torch.cat(
lowerCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase_ : Optional[int] =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase_ : Dict =F.pad(
lowerCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase_ : Optional[int] =hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase_ : Optional[int] =(1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
UpperCamelCase_ : Union[str, Any] =F.pad(lowerCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase_ : Dict =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase_ : int =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase_ : Any =self.norm_in(lowerCamelCase_ )
for block in self.transformer_blocks:
UpperCamelCase_ : Optional[int] =block(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
UpperCamelCase_ : str =self.norm_out(lowerCamelCase_ )
if self.prd_embedding is not None:
UpperCamelCase_ : Optional[Any] =hidden_states[:, -1]
else:
UpperCamelCase_ : List[Any] =hidden_states[:, additional_embeddings_len:]
UpperCamelCase_ : Dict =self.proj_to_clip_embeddings(lowerCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase_ )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Dict =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 713 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__SCREAMING_SNAKE_CASE = imread(r'digital_image_processing/image_data/lena_small.jpg')
__SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
def A_ ( ):
UpperCamelCase_ : List[str] =cn.convert_to_negative(__lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def A_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A_ ( ):
UpperCamelCase_ : Dict =canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A_ ( ):
UpperCamelCase_ : Any =imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase_ : Tuple =canny.canny(__lowercase )
# assert canny array for at least one True
assert canny_array.any()
def A_ ( ):
assert gg.gaussian_filter(__lowercase , 5 , sigma=0.9 ).all()
def A_ ( ):
# laplace diagonals
UpperCamelCase_ : Dict =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase_ : Optional[Any] =conv.img_convolve(__lowercase , __lowercase ).astype(__lowercase )
assert res.any()
def A_ ( ):
assert med.median_filter(__lowercase , 3 ).any()
def A_ ( ):
UpperCamelCase_ , UpperCamelCase_ : List[str] =sob.sobel_filter(__lowercase )
assert grad.any() and theta.any()
def A_ ( ):
UpperCamelCase_ : Dict =sp.make_sepia(__lowercase , 20 )
assert sepia.all()
def A_ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
UpperCamelCase_ : Optional[Any] =bs.Burkes(imread(__lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def A_ ( __lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
UpperCamelCase_ : Optional[Any] =rs.NearestNeighbour(imread(__lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def A_ ( ):
UpperCamelCase_ : Optional[int] ='digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
UpperCamelCase_ : Optional[Any] =imread(__lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase_ : Optional[Any] =0
UpperCamelCase_ : Optional[Any] =0
UpperCamelCase_ : int =image[x_coordinate][y_coordinate]
UpperCamelCase_ : Dict =lbp.get_neighbors_pixel(
__lowercase , __lowercase , __lowercase , __lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase_ : Tuple =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase_ : Optional[Any] =lbp.local_binary_value(__lowercase , __lowercase , __lowercase )
assert lbp_image.any()
| 395 | 0 |
'''simple docstring'''
def A__ ( A_ ) -> str:
return "".join([hex(A_ )[2:].zfill(2 ).upper() for byte in list(A_ )] )
def A__ ( A_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(A_ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A_ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__magic_name__ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__magic_name__ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__magic_name__ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def A__ ( A_ ) -> Any:
with open(A_ , "rb" ) as f:
_lowercase = Image.open(A_ )
return im.convert("RGB" )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'} )
UpperCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case ( self : int ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCamelCase__ )} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A__ ( A_ ) -> Optional[Any]:
_lowercase = torch.stack([example["pixel_values"] for example in examples] )
_lowercase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def A__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A_ , A_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase = {}
if data_args.train_dir is not None:
_lowercase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_lowercase = os.path.join(data_args.validation_dir , "**" )
_lowercase = load_dataset(
"imagefolder" , data_files=A_ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowercase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
_lowercase = dataset["train"].train_test_split(data_args.train_val_split )
_lowercase = split["train"]
_lowercase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowercase = dataset["train"].features["labels"].names
_lowercase , _lowercase = {}, {}
for i, label in enumerate(A_ ):
_lowercase = str(A_ )
_lowercase = label
# Load the accuracy metric from the datasets package
_lowercase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A_ ) , labelaid=A_ , idalabel=A_ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_lowercase = image_processor.size["shortest_edge"]
else:
_lowercase = (image_processor.size["height"], image_processor.size["width"])
_lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_lowercase = Compose(
[
RandomResizedCrop(A_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_lowercase = Compose(
[
Resize(A_ ),
CenterCrop(A_ ),
ToTensor(),
normalize,
] )
def train_transforms(A_ ):
_lowercase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A_ ):
_lowercase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowercase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowercase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A_ )
# Initalize our trainer
_lowercase = Trainer(
model=A_ , args=A_ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A_ , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
_lowercase = None
if training_args.resume_from_checkpoint is not None:
_lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase = last_checkpoint
_lowercase = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowercase = trainer.evaluate()
trainer.log_metrics("eval" , A_ )
trainer.save_metrics("eval" , A_ )
# Write model card and (optionally) push to hub
_lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 497 | 1 |
"""simple docstring"""
def lowercase_ ( _lowercase : List[str] , _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[str] = len(UpperCamelCase__ ) + 1
UpperCAmelCase : Any = len(UpperCamelCase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase : Dict = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
# since string of zero length match pattern of zero length
UpperCAmelCase : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase__ ):
UpperCAmelCase : List[str] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase : List[str] = dp[i - 1][j]
else:
UpperCAmelCase : Optional[int] = 0
else:
UpperCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case_ : Optional[Any] = "aab"
snake_case_ : List[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 703 |
"""simple docstring"""
def lowercase_ ( _lowercase : list , _lowercase : int , _lowercase : int = 0 , _lowercase : int = 0 ):
'''simple docstring'''
UpperCAmelCase : Tuple = right or len(_lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowercase , _lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
class A :
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : str = "" , _UpperCAmelCase : bool = False ) -> None:
"""simple docstring"""
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowerCamelCase__ (self : str , _UpperCAmelCase : str ) -> tuple[str, str, str]:
"""simple docstring"""
lowercase__ = 0
for q, w in zip(self.prefix , _UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : list[str] ) -> None:
"""simple docstring"""
for word in words:
self.insert(_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
_UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str ) -> bool:
"""simple docstring"""
lowercase__ = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str ) -> bool:
"""simple docstring"""
lowercase__ = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : int = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ) -> bool:
"""simple docstring"""
lowercase__ = """banana bananas bandana band apple all beast""".split()
lowercase__ = RadixNode()
root.insert_many(__magic_name__ )
assert all(root.find(__magic_name__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert test_trie()
def UpperCamelCase ( ) -> None:
"""simple docstring"""
lowercase__ = RadixNode()
lowercase__ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__magic_name__ )
print("""Words:""" , __magic_name__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 15 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : torch.FloatTensor
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , UpperCAmelCase__ = 16 , UpperCAmelCase__ = 88 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = 32 , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = "geglu" , UpperCAmelCase__ = True , UpperCAmelCase__ = True , ):
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , eps=1e-6 , affine=UpperCAmelCase__ )
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , double_self_attention=UpperCAmelCase__ , norm_elementwise_affine=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__ )
] )
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=1 , UpperCAmelCase__=None , UpperCAmelCase__ = True , ):
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
A__ = self.norm(UpperCAmelCase__ )
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = self.proj_in(UpperCAmelCase__ )
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , class_labels=UpperCAmelCase__ , )
# 3. Output
A__ = self.proj_out(UpperCAmelCase__ )
A__ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
A__ = hidden_states.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase__ )
| 491 | 0 |
from __future__ import annotations
import queue
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A ) ->Union[str, Any]:
UpperCAmelCase__ :List[Any] = data
UpperCAmelCase__ :Dict = None
UpperCAmelCase__ :Optional[int] = None
def A ( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ :List[Any] = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ :queue.Queue = queue.Queue()
UpperCAmelCase__ :Union[str, Any] = TreeNode(int(SCREAMING_SNAKE_CASE ) )
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCAmelCase__ :List[str] = q.get()
UpperCAmelCase__ :List[str] = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase__ :Tuple = input(SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ :Union[str, Any] = TreeNode(int(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :Optional[Any] = left_node
q.put(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[Any] = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase__ :Any = input(SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ :Optional[int] = TreeNode(int(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :Tuple = right_node
q.put(SCREAMING_SNAKE_CASE )
raise
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCAmelCase__ :queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCAmelCase__ :Optional[int] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCAmelCase__ :queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCAmelCase__ :List[Any] = []
while not q.empty():
UpperCAmelCase__ :List[str] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCAmelCase__ :list[TreeNode] = []
UpperCAmelCase__ :Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :str = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ :List[Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase__ :Union[str, Any] = n.right
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCAmelCase__ :list[TreeNode] = []
UpperCAmelCase__ :Optional[int] = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = n.left
UpperCAmelCase__ :Union[str, Any] = stack.pop()
print(n.data , end=',' )
UpperCAmelCase__ :Tuple = n.right
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ :int = [], []
UpperCAmelCase__ :Optional[int] = node
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ :Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def A ( SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ :Optional[int] = divmod(width - len(SCREAMING_SNAKE_CASE ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
__snake_case : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 433 |
from itertools import count
def A ( SCREAMING_SNAKE_CASE = 50 ):
"""simple docstring"""
UpperCAmelCase__ :str = [1] * min_block_length
for n in count(SCREAMING_SNAKE_CASE ):
fill_count_functions.append(1 )
for block_length in range(SCREAMING_SNAKE_CASE , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 433 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = VideoToVideoSDPipeline
UpperCAmelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCAmelCase__ : Dict = False
# No `output_type`.
UpperCAmelCase__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def UpperCamelCase( self ):
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
_snake_case = CLIPTextModel(lowerCamelCase )
_snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=0 ):
# 3 frames
_snake_case = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if str(lowerCamelCase ).startswith("mps" ):
_snake_case = torch.manual_seed(lowerCamelCase )
else:
_snake_case = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase( self ):
_snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = VideoToVideoSDPipeline(**lowerCamelCase )
_snake_case = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = self.get_dummy_inputs(lowerCamelCase )
_snake_case = "np"
_snake_case = sd_pipe(**lowerCamelCase ).frames
_snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_snake_case = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case = torch.randn((1, 10, 3, 1_024, 576) , generator=lowerCamelCase )
_snake_case = video.to("cuda" )
_snake_case = "Spiderman is surfing"
_snake_case = pipe(lowerCamelCase , video=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=3 , output_type="pt" ).frames
_snake_case = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 672 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''longformer'''
def __init__( self : Union[str, Any] , __a : Union[List[int], int] = 512 , __a : int = 2 , __a : int = 1 , __a : int = 0 , __a : int = 2 , __a : int = 30522 , __a : int = 768 , __a : int = 12 , __a : int = 12 , __a : int = 3072 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : int = 512 , __a : int = 2 , __a : float = 0.02 , __a : float = 1E-12 , __a : bool = False , **__a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
__lowercase : Tuple = attention_window
__lowercase : str = sep_token_id
__lowercase : Tuple = bos_token_id
__lowercase : Optional[int] = eos_token_id
__lowercase : Any = vocab_size
__lowercase : Any = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Dict = hidden_act
__lowercase : Dict = intermediate_size
__lowercase : Dict = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : int = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : List[str] = initializer_range
__lowercase : Union[str, Any] = layer_norm_eps
__lowercase : List[Any] = onnx_export
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : "PretrainedConfig" , __a : str = "default" , __a : "List[PatchingSpec]" = None ) -> Tuple:
"""simple docstring"""
super().__init__(__a , __a , __a )
__lowercase : Dict = True
@property
def lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def lowerCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase : Any = super().outputs
if self.task == "default":
__lowercase : Any = {0: """batch"""}
return outputs
@property
def lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase ( self : Dict , __a : "PreTrainedTokenizerBase" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = super().generate_dummy_inputs(
preprocessor=__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__lowercase : Optional[int] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
__lowercase : str = 1
return inputs | 716 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ ) | 649 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def UpperCamelCase ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE = [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys]
SCREAMING_SNAKE_CASE = Counter(snake_case__ )
SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : Any=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super().construct_mapping(snake_case__ , deep=snake_case__ )
self._check_no_duplicates_on_constructed_node(snake_case__ )
return mapping
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Tuple[Optional[str], str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE = full_content[1:].index('---' ) + 1
SCREAMING_SNAKE_CASE = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCamelCase )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
# class attributes
__UpperCamelCase ={"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , snake_case__ : Path ):
"""simple docstring"""
with open(snake_case__ , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case__ )
else:
return cls()
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Path ):
"""simple docstring"""
if path.exists():
with open(snake_case__ , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE = readme_file.read()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self._to_readme(snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(snake_case__ )
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n' + content
else:
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCamelCase ( cls : Dict , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case__ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding='utf-8' , ).decode('utf-8' )
a_ : int = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ : int = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a_ : Optional[int] = ap.parse_args()
a_ : List[str] = Path(args.readme_filepath)
a_ : str = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__snake_case : List[str] =logging.get_logger(__name__)
__snake_case : Optional[Any] ='T5Config'
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""mt5"""
snake_case_ =MTaConfig
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""mt5"""
snake_case_ =MTaConfig
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""mt5"""
snake_case_ =MTaConfig
| 647 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = "arrow" ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
super().__init__(
split=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,keep_in_memory=__lowerCamelCase ,streaming=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : List[Any] = load_from_cache_file
lowerCAmelCase__ : Any = file_format
lowerCAmelCase__ : Dict = Spark(
df=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,working_dir=__lowerCamelCase ,**__lowerCamelCase ,)
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 647 | 1 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] ):
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def __snake_case ( SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __snake_case ( SCREAMING_SNAKE_CASE: Union[str, Any] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
if exitstatus == 5:
_lowerCAmelCase = 0
# Doctest custom flag to ignore output.
_snake_case = doctest.register_optionflag('''IGNORE_RESULT''')
_snake_case = doctest.OutputChecker
class _SCREAMING_SNAKE_CASE ( _lowercase ):
'''simple docstring'''
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) -> Tuple:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A_ , A_ , A_ )
_snake_case = CustomOutputChecker
_snake_case = HfDoctestModule
_snake_case = HfDocTestParser
| 700 |
"""simple docstring"""
import pytest
_snake_case = '''__dummy_dataset1__'''
_snake_case = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def __snake_case ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __snake_case ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = dataset_loading_script_name
_lowerCAmelCase = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = script_dir / f"""{script_name}.py"""
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
| 491 | 0 |
_lowerCamelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
_lowerCamelCase = ['a', 'b', 'c', 'd', 'e']
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = start
# add current to visited
visited.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
_lowerCamelCase = topological_sort('a', [], [])
print(sort) | 6 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : int=64 , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
_snake_case : Tuple = np.random.default_rng(lowerCamelCase_ )
_snake_case : Dict = length
_snake_case : Union[str, Any] = rng.normal(size=(length,) ).astype(np.floataa )
_snake_case : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[str] ):
'''simple docstring'''
return self.length
def __getitem__( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Any=0 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Tuple=False ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_snake_case : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_snake_case : List[str] = True
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_snake_case : str = False
return x * self.a[0] + self.b[0]
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Optional[int]=0 , lowerCamelCase_ : int=False ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[Any] = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
_snake_case : List[str] = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
_snake_case : Dict = True
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_snake_case : Any = False
return x * self.a + self.b
def A__( __lowerCAmelCase , __lowerCAmelCase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_snake_case : str = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case : List[str] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
_snake_case : Tuple = load_dataset('csv' , data_files=__lowerCAmelCase )
_snake_case : Any = datasets['train'].unique('label' )
_snake_case : Union[str, Any] = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Optional[Any] = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' )
if "label" in examples:
_snake_case : Optional[int] = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : Any = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_snake_case : int = DataLoader(tokenized_datasets['train'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
_snake_case : List[Any] = DataLoader(tokenized_datasets['validation'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 304 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 524 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case : int = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_snake_case : Union[str, Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_snake_case : Dict = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self :int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCamelCase ( self :str , __UpperCamelCase :List[List[List[str]]] , __UpperCamelCase :List[List[str]] , __UpperCamelCase :int = 1 , __UpperCamelCase :int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase , hypotheses=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase )
}
| 524 | 1 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a__ : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __magic_name__ ):
def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Dict:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
snake_case__ = eval_examples
snake_case__ = post_process_function
snake_case__ = quant_trainer_args
snake_case__ = 128 # default number of calibration samples
def _snake_case ( self , UpperCamelCase_=None ) -> Optional[Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
snake_case__ = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case__ = self._remove_unused_columns(UpperCamelCase_ , description='Calibration' )
return DataLoader(
UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , )
def _snake_case ( self , UpperCamelCase_=None ) -> Any:
snake_case__ = self.train_dataset if calib_dataset is None else calib_dataset
snake_case__ = self.get_calib_dataloader(UpperCamelCase_ )
snake_case__ = self.model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase_ )
logger.info('***** Running calibration *****' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCamelCase_ ):
# Prediction step
snake_case__ , snake_case__ , snake_case__ = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args )
snake_case__ = model
def _snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> List[Any]:
snake_case__ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case__ = self.get_eval_dataloader(UpperCamelCase_ )
snake_case__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
UpperCamelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
snake_case__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
snake_case__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(UpperCamelCase_ )
self.log(UpperCamelCase_ )
else:
snake_case__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> Tuple:
snake_case__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
UpperCamelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
snake_case__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , 'predict' )
snake_case__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(UpperCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_="./" ) -> Union[str, Any]:
snake_case__ = self.eval_dataset
snake_case__ = self.get_eval_dataloader(UpperCamelCase_ )
snake_case__ = next(iter(UpperCamelCase_ ) )
# saving device - to make it consistent
snake_case__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
snake_case__ = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
snake_case__ = True
snake_case__ = self.model.to(UpperCamelCase_ )
model.eval()
model.float()
snake_case__ = model.module if hasattr(UpperCamelCase_ , 'module' ) else model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args )
snake_case__ = os.path.join(UpperCamelCase_ , 'model.onnx' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case__ = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCamelCase_ , )
logger.info('onnx export finished' )
| 368 |
'''simple docstring'''
from __future__ import annotations
a__ : Optional[int] = list[tuple[int, int]]
a__ : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any:
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = g_cost
snake_case__ = parent
snake_case__ = self.calculate_heuristic()
def _snake_case ( self ) -> float:
snake_case__ = abs(self.pos_x - self.goal_x )
snake_case__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase_ ) -> bool:
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ )
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase_ )
snake_case__ = [self.start]
snake_case__ = []
snake_case__ = False
def _snake_case ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(UpperCamelCase_ )
self.closed_nodes.append(UpperCamelCase_ )
snake_case__ = self.get_successors(UpperCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
snake_case__ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase_ )
else:
self.open_nodes.append(UpperCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self , UpperCamelCase_ ) -> list[Node]:
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) )
return successors
def _snake_case ( self , UpperCamelCase_ ) -> Path:
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a__ : List[str] = (0, 0)
a__ : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
a__ : Optional[int] = GreedyBestFirst(init, goal)
a__ : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a__ : Tuple = 2
for elem in grid:
print(elem)
| 368 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase : Tuple = 6_37_81_37.0
UpperCAmelCase : Union[str, Any] = 6_35_67_52.31_42_45
UpperCAmelCase : Tuple = 637_8137
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = (AXIS_A - AXIS_B) / AXIS_A
lowercase_ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
lowercase_ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
lowercase_ = radians(SCREAMING_SNAKE_CASE_ )
lowercase_ = radians(SCREAMING_SNAKE_CASE_ )
# Equation
lowercase_ = sin((phi_a - phi_a) / 2 )
lowercase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase_ = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE_ ) * cos(SCREAMING_SNAKE_CASE_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 5_0_2_5_7 , lowerCAmelCase_ : int = 1_0_2_4 , lowerCAmelCase_ : int = 7_6_8 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "gelu_new" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 1E-5 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
lowercase_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''')
lowercase_ = prefix_inner_dim
lowercase_ = prefix_hidden_dim
lowercase_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase_ = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase_ = GPTaConfig(
vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , )
lowercase_ = GPTaLMHeadModel(lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , ):
"""simple docstring"""
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
lowercase_ = self.encode_prefix(lowerCAmelCase_)
lowercase_ = self.decode_prefix(lowerCAmelCase_)
lowercase_ = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowercase_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowercase_ = torch.cat((dummy_token, input_ids) , dim=1)
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.device):
"""simple docstring"""
return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase_)
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.split(lowerCAmelCase_ , 1 , dim=0)
lowercase_ = []
lowercase_ = []
for feature in features:
lowercase_ = self.decode_prefix(feature.to(lowerCAmelCase_)) # back to the clip feature
# Only support beam search for now
lowercase_ , lowercase_ = self.generate_beam(
input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase_ = torch.stack(lowerCAmelCase_)
lowercase_ = torch.stack(lowerCAmelCase_)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : int = 6_7 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
lowercase_ = eos_token_id
lowercase_ = None
lowercase_ = None
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int)
lowercase_ = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool)
if input_embeds is not None:
lowercase_ = input_embeds
else:
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
for i in range(lowerCAmelCase_):
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_)
lowercase_ = outputs.logits
lowercase_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase_ = logits.softmax(-1).log()
if scores is None:
lowercase_ , lowercase_ = logits.topk(lowerCAmelCase_ , -1)
lowercase_ = generated.expand(lowerCAmelCase_ , *generated.shape[1:])
lowercase_ , lowercase_ = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowercase_ = next_tokens
else:
lowercase_ = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:])
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
else:
lowercase_ = -float(np.inf)
lowercase_ = 0
lowercase_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase_ = scores_sum / seq_lengths[:, None]
lowercase_ , lowercase_ = scores_sum_average.view(-1).topk(lowerCAmelCase_ , -1)
lowercase_ = next_tokens // scores_sum.shape[1]
lowercase_ = seq_lengths[next_tokens_source]
lowercase_ = next_tokens % scores_sum.shape[1]
lowercase_ = next_tokens.unsqueeze(1)
lowercase_ = tokens[next_tokens_source]
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
lowercase_ = generated[next_tokens_source]
lowercase_ = scores_sum_average * seq_lengths
lowercase_ = is_stopped[next_tokens_source]
lowercase_ = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowercase_ = torch.cat((generated, next_token_embed) , dim=1)
lowercase_ = is_stopped + next_tokens.eq(lowerCAmelCase_).squeeze()
if is_stopped.all():
break
lowercase_ = scores / seq_lengths
lowercase_ = scores.argsort(descending=lowerCAmelCase_)
# tokens tensors are already padded to max_seq_length
lowercase_ = [tokens[i] for i in order]
lowercase_ = torch.stack(lowerCAmelCase_ , dim=0)
lowercase_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 100 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool , _UpperCAmelCase : list[int] , _UpperCAmelCase : float ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
)
def __UpperCAmelCase ( ) -> None:
__snake_case = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__snake_case = math.log(len(_UpperCAmelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 69 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase__ : int = logging.getLogger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = '''sequence-classification'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == dict:
SCREAMING_SNAKE_CASE__ : List[str] = Namespace(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = glue_output_modes[hparams.task]
SCREAMING_SNAKE_CASE__ : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.mode )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : Any = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = outputs[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.trainer.lr_schedulers[0]["""scheduler"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hparams
SCREAMING_SNAKE_CASE__ : Optional[int] = processors[args.task]()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
SCREAMING_SNAKE_CASE__ : List[str] = self._feature_file(SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE__ : str = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """dev""" if mode == """test""" else mode
SCREAMING_SNAKE_CASE__ : Optional[int] = self._feature_file(SCREAMING_SNAKE_CASE__ )
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : List[Any] = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = outputs[:2]
SCREAMING_SNAKE_CASE__ : int = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
SCREAMING_SNAKE_CASE__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : int = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.squeeze(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
SCREAMING_SNAKE_CASE__ : int = dict(results.items() )
SCREAMING_SNAKE_CASE__ : str = results
return ret, preds_list, out_label_list
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
add_generic_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GLUETransformer.add_model_specific_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
"""./results""" ,f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ : Dict = GLUETransformer(_snake_case )
SCREAMING_SNAKE_CASE__ : int = generic_train(_snake_case ,_snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
SCREAMING_SNAKE_CASE__ : Any = sorted(glob.glob(os.path.join(args.output_dir ,"""checkpoint-epoch=*.ckpt""" ) ,recursive=_snake_case ) )
SCREAMING_SNAKE_CASE__ : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_snake_case )
if __name__ == "__main__":
main()
| 223 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self :str ,__UpperCAmelCase :str ,__UpperCAmelCase :Any=7 ,__UpperCAmelCase :Dict=3 ,__UpperCAmelCase :List[Any]=18 ,__UpperCAmelCase :List[Any]=30 ,__UpperCAmelCase :Optional[Any]=4_00 ,__UpperCAmelCase :Any=True ,__UpperCAmelCase :List[str]=None ,__UpperCAmelCase :Tuple=True ,__UpperCAmelCase :Optional[Any]=[0.5, 0.5, 0.5] ,__UpperCAmelCase :str=[0.5, 0.5, 0.5] ,) -> int:
lowerCamelCase__ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase__ : Any = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : int = min_resolution
lowerCamelCase__ : Union[str, Any] = max_resolution
lowerCamelCase__ : Any = do_resize
lowerCamelCase__ : int = size
lowerCamelCase__ : Any = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Tuple = image_std
def lowercase_ ( self :List[str] ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = DPTImageProcessor if is_vision_available() else None
def lowercase_ ( self :Tuple ) -> Dict:
lowerCamelCase__ : Optional[int] = DPTImageProcessingTester(self )
@property
def lowercase_ ( self :Any ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self :str ) -> Union[str, Any]:
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase ,'''size''' ) )
def lowercase_ ( self :Any ) -> Any:
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
lowerCamelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def lowercase_ ( self :Any ) -> Optional[Any]:
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(__UpperCAmelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def lowercase_ ( self :List[str] ) -> List[Any]:
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCamelCase__ : Any = image_processing(__UpperCAmelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def lowercase_ ( self :Dict ) -> Any:
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCamelCase__ : List[str] = image_processing(__UpperCAmelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 707 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def lowercase_ ( self :int ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''num_encoder_blocks''' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self :Union[str, Any] ,__UpperCAmelCase :str ,__UpperCAmelCase :Optional[int]=13 ,__UpperCAmelCase :Any=64 ,__UpperCAmelCase :List[Any]=3 ,__UpperCAmelCase :str=4 ,__UpperCAmelCase :Any=[2, 2, 2, 2] ,__UpperCAmelCase :List[Any]=[8, 4, 2, 1] ,__UpperCAmelCase :Dict=[16, 32, 64, 1_28] ,__UpperCAmelCase :Any=[1, 4, 8, 16] ,__UpperCAmelCase :int=[1, 2, 4, 8] ,__UpperCAmelCase :Union[str, Any]=True ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :Optional[int]="gelu" ,__UpperCAmelCase :Tuple=0.1 ,__UpperCAmelCase :List[Any]=0.1 ,__UpperCAmelCase :List[Any]=0.02 ,__UpperCAmelCase :str=3 ,__UpperCAmelCase :Tuple=None ,) -> int:
"""simple docstring"""
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Union[str, Any] = num_encoder_blocks
lowerCamelCase__ : Union[str, Any] = sr_ratios
lowerCamelCase__ : int = depths
lowerCamelCase__ : Optional[Any] = hidden_sizes
lowerCamelCase__ : List[Any] = downsampling_rates
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Any = scope
def lowercase_ ( self :Dict ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self :Tuple ) -> Any:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def lowercase_ ( self :Any ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = SegformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :str ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase__ : List[Any] = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def lowercase_ ( self :str ,__UpperCAmelCase :Dict ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Union[str, Any] = SegformerForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertGreater(result.loss ,0.0 )
def lowercase_ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def lowercase_ ( self :int ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SegformerModelTester(self )
lowerCamelCase__ : int = SegformerConfigTester(self ,config_class=__UpperCAmelCase )
def lowercase_ ( self :Tuple ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self :Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ ( self :Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__UpperCAmelCase )
def lowercase_ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def lowercase_ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def lowercase_ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def lowercase_ ( self :Any ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(__UpperCAmelCase )
lowerCamelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
def lowercase_ ( self :List[str] ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = True
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = outputs.attentions
lowerCamelCase__ : int = sum(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Dict = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
lowerCamelCase__ : Dict = (self.model_tester.image_size // 32) ** 2
lowerCamelCase__ : Optional[Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
lowerCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
self.assertEqual(out_len + 1 ,len(__UpperCAmelCase ) )
lowerCamelCase__ : Any = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Tuple = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def lowercase_ ( self :int ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Any ,__UpperCAmelCase :Optional[Any] ):
lowerCamelCase__ : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : str = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Any = outputs.hidden_states
lowerCamelCase__ : Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ):
continue
lowerCamelCase__ : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCamelCase__ : str = self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ,return_labels=__UpperCAmelCase )
lowerCamelCase__ : List[str] = model(**__UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def lowercase_ ( self :Tuple ) -> Tuple:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = SegformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : Optional[Any] = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCamelCase__ : Dict = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
@slow
def lowercase_ ( self :List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : str = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__UpperCAmelCase )
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : Any = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : Dict = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(__UpperCAmelCase )
lowerCamelCase__ : Dict = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCamelCase__ : List[str] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__UpperCAmelCase ,atol=1E-1 ) )
@slow
def lowercase_ ( self :int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__UpperCAmelCase )
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : int = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : int = model(__UpperCAmelCase )
lowerCamelCase__ : Dict = outputs.logits.detach().cpu()
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ,target_sizes=[(5_00, 3_00)] )
lowerCamelCase__ : Optional[int] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape ,__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape ,__UpperCAmelCase )
| 121 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Any = metric_id
class a_ :
A__ : Dict = [MetricMock(a ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
if "tmp_path" in args:
snake_case : str = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__magic_name__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*__magic_name__ )
| 598 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Tuple = tempfile.mkdtemp()
snake_case : Optional[int] = BlipImageProcessor()
snake_case : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
snake_case : Tuple = BlipaProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : List[Any] , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).tokenizer
def lowerCAmelCase( self : List[str] , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : str = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
snake_case : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : str = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : int = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : int = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(UpperCAmelCase__ , return_tensors='''np''' )
snake_case : str = processor(images=UpperCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : List[Any] = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : Any = '''lower newer'''
snake_case : List[Any] = processor(text=UpperCAmelCase__ )
snake_case : Optional[int] = tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : List[Any] = self.get_image_processor()
snake_case : Any = self.get_tokenizer()
snake_case : Any = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : List[Any] = '''lower newer'''
snake_case : str = self.prepare_image_inputs()
snake_case : Optional[int] = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : str = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Any = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Tuple = processor.batch_decode(UpperCAmelCase__ )
snake_case : str = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : List[Any] = self.get_image_processor()
snake_case : Any = self.get_tokenizer()
snake_case : Optional[Any] = BlipaProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
snake_case : Any = '''lower newer'''
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : str = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 598 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="conditional_detr"
lowerCamelCase : Dict =["past_key_values"]
lowerCamelCase : Tuple ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[str] , a : Union[str, Any]=True , a : Tuple=None , a : List[Any]=3 , a : Tuple=3_00 , a : Any=6 , a : Optional[int]=20_48 , a : int=8 , a : str=6 , a : Dict=20_48 , a : Optional[Any]=8 , a : Tuple=0.0 , a : Tuple=0.0 , a : List[Any]=True , a : Optional[int]="relu" , a : Any=2_56 , a : Any=0.1 , a : Optional[int]=0.0 , a : Tuple=0.0 , a : Tuple=0.02 , a : str=1.0 , a : Union[str, Any]=False , a : Optional[Any]="sine" , a : Optional[int]="resnet50" , a : Tuple=True , a : List[Any]=False , a : str=2 , a : int=5 , a : str=2 , a : List[str]=1 , a : Optional[int]=1 , a : List[str]=2 , a : Tuple=5 , a : Tuple=2 , a : Union[str, Any]=0.25 , **a : str , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(a , a ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(a )
__lowerCamelCase = use_timm_backbone
__lowerCamelCase = backbone_config
__lowerCamelCase = num_channels
__lowerCamelCase = num_queries
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = encoder_layers
__lowerCamelCase = auxiliary_loss
__lowerCamelCase = position_embedding_type
__lowerCamelCase = backbone
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = dilation
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = mask_loss_coefficient
__lowerCamelCase = dice_loss_coefficient
__lowerCamelCase = cls_loss_coefficient
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return 12
| 546 | '''simple docstring'''
def __lowerCAmelCase ( ) -> Optional[Any]:
__lowerCamelCase = 0
for i in range(1 , 10_01 ):
total += i**i
return str(UpperCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 546 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase__ ( _a : str ):
def decorator(_a : Optional[int] ):
snake_case_ : Optional[Any] = getattr(_a , "handle_key" , [] )
handle += [key]
setattr(_a , "handle_key" , _a )
return func
return decorator
def lowerCAmelCase__ ( *_a : List[str] ):
def decorator(_a : Optional[int] ):
snake_case_ : List[str] = getattr(_a , "handle_key" , [] )
handle += keys
setattr(_a , "handle_key" , _a )
return func
return decorator
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __new__( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Optional[Any] = super().__new__(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE , "key_handler" ):
setattr(_SCREAMING_SNAKE_CASE , "key_handler" , {} )
setattr(_SCREAMING_SNAKE_CASE , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
snake_case_ : Tuple = getattr(_SCREAMING_SNAKE_CASE , "handle_key" , [] )
for key in handled_keys:
snake_case_ : Tuple = value
return new_cls
@staticmethod
def _lowerCAmelCase ( cls ) -> Optional[Any]:
snake_case_ : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
snake_case_ : List[Any] = ord(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = cls.key_handler.get(_SCREAMING_SNAKE_CASE )
if handler:
snake_case_ : Dict = char
return handler(cls )
else:
return None
def lowerCAmelCase__ ( cls : int ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 706 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = image_size
snake_case_ : Tuple = num_channels
snake_case_ : Union[str, Any] = embeddings_size
snake_case_ : int = hidden_sizes
snake_case_ : Optional[int] = depths
snake_case_ : Dict = is_training
snake_case_ : Tuple = use_labels
snake_case_ : int = hidden_act
snake_case_ : List[str] = num_labels
snake_case_ : List[Any] = scope
snake_case_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ : Union[str, Any] = TFRegNetModel(config=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : Optional[int] = self.num_labels
snake_case_ : Tuple = TFRegNetForImageClassification(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = config_and_inputs
snake_case_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A : Dict = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A : List[Any] = False
A : List[str] = False
A : Optional[Any] = False
A : List[Any] = False
A : List[Any] = False
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : List[Any] = TFRegNetModelTester(self )
snake_case_ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _lowerCAmelCase ( self ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self ) -> int:
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : Any = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ : Any = layer_type
snake_case_ : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Any = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
snake_case_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Any:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Dict = TFRegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ):
snake_case_ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : int = self.default_image_processor
snake_case_ : List[Any] = prepare_img()
snake_case_ : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
snake_case_ : Any = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# verify the logits
snake_case_ : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 114 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 104 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
try:
lowerCamelCase : List[str] = float(a_ )
except ValueError:
raise ValueError('Please enter a valid number' )
lowerCamelCase : Dict = decimal - int(a_ )
if fractional_part == 0:
return int(a_ ), 1
else:
lowerCamelCase : Tuple = len(str(a_ ).split('.' )[1] )
lowerCamelCase : int = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase : List[str] = 10**number_of_frac_digits
lowerCamelCase , lowerCamelCase : int = denominator, numerator
while True:
lowerCamelCase : Tuple = dividend % divisor
if remainder == 0:
break
lowerCamelCase , lowerCamelCase : Union[str, Any] = divisor, remainder
lowerCamelCase , lowerCamelCase : Any = numerator / divisor, denominator / divisor
return int(a_ ), int(a_ )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 133 | 0 |
from math import factorial
def a_ ( lowerCAmelCase_ : int = 100 ):
return sum(int(lowerCAmelCase_ ) for x in str(factorial(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 53 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 721 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Any = ['input_ids', 'attention_mask']
def __init__( self , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=125 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_a = [F'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_a = len(set(filter(lambda __UpperCAmelCase : bool('''extra_id''' in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
_a = extra_ids
_a = 2**8 # utf is 8 bits
# define special tokens dict
_a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_a = len(self.special_tokens_encoder )
_a = len(__UpperCAmelCase )
for i, token in enumerate(__UpperCAmelCase ):
_a = self.vocab_size + i - n
_a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _UpperCAmelCase ( self ) -> Dict:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[int]:
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
_a = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_a = [chr(__UpperCAmelCase ) for i in text.encode('''utf-8''' )]
return tokens
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
if token in self.special_tokens_encoder:
_a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_a = self.added_tokens_encoder[token]
elif len(__UpperCAmelCase ) != 1:
_a = self.unk_token_id
else:
_a = ord(__UpperCAmelCase ) + self._num_special_tokens
return token_id
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if index in self.special_tokens_decoder:
_a = self.special_tokens_decoder[index]
else:
_a = chr(index - self._num_special_tokens )
return token
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
_a = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
_a = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
_a = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
_a = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
_a = token.encode('''utf-8''' )
else:
_a = bytes([ord(__UpperCAmelCase )] )
bstring += tok_string
_a = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
return () | 285 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowercase = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 659 |
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Optional[int] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : Optional[int] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Dict = remaining_prefix
lowerCAmelCase_ : str = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = aux_node
if remaining_word == "":
lowerCAmelCase_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : int = list(self.nodes.values() )[0]
lowerCAmelCase_ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : List[str] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : List[str] = merging_node.nodes
return True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : Optional[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : str = RadixNode()
lowerCAmelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
a__ = grid[0]
for row_n in range(1 , len(lowerCamelCase_)):
a__ = grid[row_n]
a__ = fill_row(lowerCamelCase_ , lowerCamelCase_)
a__ = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_):
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCamelCase_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200 |
"""simple docstring"""
__a : List[Any] = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__a : Union[str, Any] = frozenset(['prompt', 'negative_prompt'])
__a : Any = frozenset([])
__a : Union[str, Any] = frozenset(['image'])
__a : Dict = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__a : Dict = frozenset(['image'])
__a : Dict = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__a : Optional[Any] = frozenset(['prompt', 'image', 'negative_prompt'])
__a : List[Any] = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__a : Union[str, Any] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__a : Optional[Any] = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__a : int = frozenset(['image', 'mask_image'])
__a : Tuple = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__a : Optional[Any] = frozenset(['example_image', 'image', 'mask_image'])
__a : Optional[Any] = frozenset(['class_labels'])
__a : Tuple = frozenset(['class_labels'])
__a : int = frozenset(['batch_size'])
__a : int = frozenset([])
__a : Union[str, Any] = frozenset(['batch_size'])
__a : Tuple = frozenset([])
__a : Dict = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__a : Dict = frozenset(['prompt', 'negative_prompt'])
__a : Optional[int] = frozenset(['input_tokens'])
__a : str = frozenset(['input_tokens'])
| 200 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 639 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = 3_2 , lowercase_=PILImageResampling.BILINEAR , lowercase_ = True , **lowercase_ , ) -> None:
'''simple docstring'''
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(lowercase_ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(lowercase_ , (new_h, new_w) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
return image
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ ) -> np.ndarray:
'''simple docstring'''
return rescale(image=lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(lowercase_ , size_divisor=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(lowercase_ , scale=1 / 2_5_5 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 318 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 112 |
"""simple docstring"""
def __lowercase ( lowerCamelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set({"(", "[", "{"} )
SCREAMING_SNAKE_CASE__ = set({")", "]", "}"} )
SCREAMING_SNAKE_CASE__ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCamelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase_ ) == 0 or (len(lowerCamelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase_ ) == 0
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = input("Enter sequence of brackets: " )
if is_balanced(lowerCamelCase_ ):
print(lowerCamelCase_ , "is balanced" )
else:
print(lowerCamelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 112 | 1 |
'''simple docstring'''
import requests
UpperCAmelCase : Optional[Any] = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase : Optional[int] = """https://api.openweathermap.org/data/2.5/"""
def a__ ( a__ = "Chicago" , a__ = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def a__ ( a__ = "Kolkata, India" , a__ = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def a__ ( a__ = 55.68 , a__ = 12.57 , a__ = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase : int = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 627 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Any = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
UpperCAmelCase__ : Optional[int] = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
return image
def _lowerCamelCase ( __lowerCamelCase ) -> str:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase__ : Dict = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __lowerCamelCase )
if "blocks" in key:
UpperCAmelCase__ : Optional[Any] = re.sub(r"""blocks""" , """layers""" , __lowerCamelCase )
if "attn" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""attn""" , """self_attn""" , __lowerCamelCase )
if "norm1" in key:
UpperCAmelCase__ : Union[str, Any] = re.sub(r"""norm1""" , """layer_norm1""" , __lowerCamelCase )
if "norm2" in key:
UpperCAmelCase__ : Any = re.sub(r"""norm2""" , """layer_norm2""" , __lowerCamelCase )
if "encoder.norm" in key:
UpperCAmelCase__ : Dict = re.sub(r"""encoder.norm""" , """post_layernorm""" , __lowerCamelCase )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __lowerCamelCase )
if "encoder.pos_embed" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""encoder.pos_embed""" , """embeddings.position_embedding""" , __lowerCamelCase )
if "encoder.cls_token" in key:
UpperCAmelCase__ : List[Any] = re.sub(r"""encoder.cls_token""" , """embeddings.class_embedding""" , __lowerCamelCase )
if "self_attn" in key:
UpperCAmelCase__ : List[Any] = re.sub(r"""self_attn.proj""" , """self_attn.projection""" , __lowerCamelCase )
return key
@torch.no_grad()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Any = BlipConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase__ : str = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase__ : int = BlipForConditionalGeneration(__lowerCamelCase ).eval()
UpperCAmelCase__ : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCAmelCase__ : List[str] = blip_decoder(pretrained=__lowerCamelCase , image_size=384 , vit="""base""" )
UpperCAmelCase__ : Union[str, Any] = pt_model.eval()
UpperCAmelCase__ : Optional[int] = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = rename_key(__lowerCamelCase )
UpperCAmelCase__ : List[str] = value
hf_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase__ : Tuple = 384
UpperCAmelCase__ : str = load_demo_image(image_size=__lowerCamelCase , device="""cpu""" )
UpperCAmelCase__ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase__ : Dict = tokenizer(["""a picture of"""] ).input_ids
UpperCAmelCase__ : int = hf_model.generate(__lowerCamelCase , __lowerCamelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase__ : Any = hf_model.generate(__lowerCamelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase__ : Union[str, Any] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCAmelCase__ : List[Any] = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="""base""" )
vqa_model.eval()
UpperCAmelCase__ : str = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : Dict = rename_key(__lowerCamelCase )
UpperCAmelCase__ : int = value
UpperCAmelCase__ : List[str] = BlipForQuestionAnswering(__lowerCamelCase )
hf_vqa_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase__ : Tuple = ["""How many dogs are in this image?"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : Optional[Any] = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCAmelCase__ : int = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCAmelCase__ : Any = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="""base""" )
itm_model.eval()
UpperCAmelCase__ : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : int = rename_key(__lowerCamelCase )
UpperCAmelCase__ : Any = value
UpperCAmelCase__ : Optional[int] = BlipForImageTextRetrieval(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCAmelCase__ : List[Any] = tokenizer(
__lowerCamelCase , return_tensors="""pt""" , padding="""max_length""" , truncation=__lowerCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCamelCase )
hf_itm_model.eval()
UpperCAmelCase__ : List[str] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
UpperCAmelCase__ : List[str] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 79 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_lowercase = pytest.mark.integration
_lowercase = {"""comet"""}
_lowercase = importlib.util.find_spec("""fairseq""") is not None
_lowercase = {"""code_eval"""}
_lowercase = os.name == """nt"""
_lowercase = {"""bertscore""", """frugalscore""", """perplexity"""}
_lowercase = importlib.util.find_spec("""transformers""") is not None
def lowerCamelCase__ ( a ):
@wraps(a )
def wrapper(self , a ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , a )
return wrapper
def lowerCamelCase__ ( a ):
@wraps(a )
def wrapper(self , a ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , a )
return wrapper
def lowerCamelCase__ ( a ):
@wraps(a )
def wrapper(self , a ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , a )
return wrapper
def lowerCamelCase__ ( ):
__snake_case = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@local
class a_ ( parameterized.TestCase ):
lowercase_ : List[str] = {}
lowercase_ : Optional[int] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Any ):
__snake_case = '[...]'
__snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __lowerCAmelCase ) ).module_path )
__snake_case = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCAmelCase )
# check parameters
__snake_case = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__snake_case = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowercase__ ( self : Dict , __lowerCAmelCase : Any ):
__snake_case = '[...]'
__snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__snake_case = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowercase__ ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCAmelCase ):
yield
else:
yield
@contextmanager
def lowercase__ ( self : Tuple ):
def load_local_metric(__lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Union[str, Any] ):
return load_metric(os.path.join('metrics' , __lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase )
with patch('datasets.load_metric' ) as mock_load_metric:
__snake_case = load_local_metric
yield
@classmethod
def lowercase__ ( cls : List[str] , __lowerCAmelCase : List[str] ):
def wrapper(__lowerCAmelCase : Optional[Any] ):
__snake_case = contextmanager(__lowerCAmelCase )
__snake_case = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase__ ( a ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : Dict ):
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__snake_case = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase__ ( a ):
import torch
def bert_cos_score_idf(a , a , *a , **a ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(a ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__snake_case = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase__ ( a ):
def load_from_checkpoint(a ):
class a_ :
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[str] ):
assert len(__lowerCAmelCase ) == 2
__snake_case = [0.19, 0.92]
return scores, sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__snake_case = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__snake_case = load_from_checkpoint
yield
def lowerCamelCase__ ( ):
__snake_case = load_metric(os.path.join('metrics' , 'seqeval' ) )
__snake_case = 'ERROR'
__snake_case = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(a , match=re.escape(a ) ):
metric.compute(predictions=[] , references=[] , scheme=a )
| 427 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase__ ( a , a=None ):
require_version(deps[pkg] , a )
| 427 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)] )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Optional[Any] = GenerationConfig(
do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase )
__A : Optional[int] = GenerationConfig.from_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Dict = AutoConfig.from_pretrained("gpt2" )
__A : List[str] = GenerationConfig.from_model_config(__UpperCAmelCase )
__A : Tuple = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __UpperCAmelCase( self ):
__A : Tuple = GenerationConfig()
__A : str = {
"max_new_tokens": 1_024,
"foo": "bar",
}
__A : Union[str, Any] = copy.deepcopy(__UpperCAmelCase )
__A : Dict = generation_config.update(**__UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__UpperCAmelCase , {"foo": "bar"} )
def __UpperCAmelCase( self ):
__A : List[str] = GenerationConfig()
__A : List[str] = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__UpperCAmelCase )
__A : List[Any] = GenerationConfig.from_pretrained(__UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
__A : List[Any] = GenerationConfig.from_model_config(__UpperCAmelCase )
assert not hasattr(__UpperCAmelCase , "foo" ) # no new kwargs should be initialized if from config
def __UpperCAmelCase( self ):
__A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
__A : List[Any] = GenerationConfig(
do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCAmelCase )
__A : List[str] = GenerationConfig.from_pretrained(__UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase( cls ):
__A : Tuple = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls ):
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __UpperCAmelCase( self ):
__A : int = GenerationConfig(
do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
__A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCAmelCase , repo_id="test-generation-config" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__A : List[str] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def __UpperCAmelCase( self ):
__A : Optional[Any] = GenerationConfig(
do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
__A : Any = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCAmelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__A : str = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
| 520 | class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
__A : Optional[Any] = n
__A : Optional[int] = [None] * self.n
__A : Optional[int] = 0 # index of the first element
__A : Any = 0
__A : Any = 0
def __len__( self ):
return self.size
def __UpperCAmelCase( self ):
return self.size == 0
def __UpperCAmelCase( self ):
return False if self.is_empty() else self.array[self.front]
def __UpperCAmelCase( self , __UpperCAmelCase ):
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
__A : Tuple = data
__A : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def __UpperCAmelCase( self ):
if self.size == 0:
raise Exception("UNDERFLOW" )
__A : List[Any] = self.array[self.front]
__A : str = None
__A : Optional[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 520 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
UpperCAmelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCAmelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCamelCase_ ( a_ ):
_A : Any = 'big_bird'
def __init__( self , snake_case__=5_03_58 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=40_96 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 378 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 50 ) -> int:
_snake_case : Optional[int] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 477 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
_snake_case : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
_snake_case : Any = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat) -> Dict:
"""simple docstring"""
_snake_case : int = nlp
_snake_case : Dict = reader
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : ArgumentParser) -> Any:
"""simple docstring"""
_snake_case : Any = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""")
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""")
run_parser.add_argument("""--input""" , type=lowerCAmelCase , help="""Path to the file to use for inference""")
run_parser.add_argument("""--output""" , type=lowerCAmelCase , help="""Path to the file that will be used post to write results.""")
run_parser.add_argument("""--model""" , type=lowerCAmelCase , help="""Name or path to the model to instantiate.""")
run_parser.add_argument("""--config""" , type=lowerCAmelCase , help="""Name or path to the model's config to instantiate.""")
run_parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""")
run_parser.add_argument(
"""--column""" , type=lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""")
run_parser.set_defaults(func=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self._nlp, []
for entry in self._reader:
_snake_case : List[Any] = nlp(**lowerCAmelCase) if self._reader.is_multi_columns else nlp(lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
outputs.append(lowerCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : Any = self._reader.save_binary(lowerCAmelCase)
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(lowerCAmelCase)
| 477 | 1 |
def snake_case ( UpperCAmelCase : int = 1_00 ):
A = (n * (n + 1) // 2) ** 2
A = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 110 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = JukeboxTokenizer
snake_case = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def A( self : Optional[int] ) -> Any:
'''simple docstring'''
import torch
A = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
A = tokenizer(**self.metas )['input_ids']
# fmt: off
A = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def A( self : Tuple ) -> List[Any]:
'''simple docstring'''
import torch
A = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
A = tokenizer(**self.metas )['input_ids']
# fmt: off
A = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 110 | 1 |
def lowerCAmelCase_ ( snake_case_ = 3,snake_case_ = 7,snake_case_ = 1000000 ):
_A : Dict = 0
_A : int = 1
for current_denominator in range(1,limit + 1 ):
_A : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_A : List[str] = current_numerator
_A : Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 307 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase ( UpperCamelCase__ ):
_a = "informer"
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = None , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 64 , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 0.05 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , _a = "prob" , _a = 5 , _a = True , **_a , ) -> Tuple:
# time series specific configuration
_A : Optional[int] = prediction_length
_A : int = context_length or prediction_length
_A : List[str] = distribution_output
_A : Dict = loss
_A : Optional[Any] = input_size
_A : Dict = num_time_features
_A : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A : Dict = scaling
_A : List[Any] = num_dynamic_real_features
_A : Union[str, Any] = num_static_real_features
_A : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_A : Any = cardinality
else:
_A : Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_A : Tuple = embedding_dimension
else:
_A : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_A : List[str] = num_parallel_samples
# Transformer architecture configuration
_A : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A : int = d_model
_A : int = encoder_attention_heads
_A : List[str] = decoder_attention_heads
_A : Any = encoder_ffn_dim
_A : Union[str, Any] = decoder_ffn_dim
_A : Dict = encoder_layers
_A : Dict = decoder_layers
_A : Tuple = dropout
_A : Any = attention_dropout
_A : int = activation_dropout
_A : Optional[int] = encoder_layerdrop
_A : List[str] = decoder_layerdrop
_A : Optional[int] = activation_function
_A : Optional[Any] = init_std
_A : Any = use_cache
# Informer
_A : str = attention_type
_A : Any = sampling_factor
_A : Union[str, Any] = distil
super().__init__(is_encoder_decoder=_a , **_a )
@property
def a__ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 307 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
UpperCamelCase_: Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
UpperCamelCase_: Any = nn.Parameter(UpperCamelCase__ )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
UpperCamelCase_: Dict = np.asarray(weights[0] )
UpperCamelCase_: List[Any] = np.asarray(weights[1] )
UpperCamelCase_: List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
# set torch weights for 1-to-1 comparison
UpperCamelCase_: Tuple = np.asarray(weights[0] )
UpperCamelCase_: Any = np.asarray(weights[1] )
UpperCamelCase_: List[Any] = np.asarray(weights[2] )
UpperCamelCase_: List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
# layernorm 1
UpperCamelCase_: str = weights[0][0][0]
UpperCamelCase_: Optional[int] = np.asarray(layer_norm_a[0] )
UpperCamelCase_: Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
UpperCamelCase_: List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
UpperCamelCase_: int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
UpperCamelCase_: Dict = intermediate_weights[2]
# layernorm 2
UpperCamelCase_: Optional[int] = np.asarray(intermediate_weights[0][0] )
UpperCamelCase_: Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
UpperCamelCase_: Optional[Any] = np.asarray(intermediate_weights[1][0] )
UpperCamelCase_: Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
UpperCamelCase_: Optional[int] = np.asarray(intermediate_weights[4][0] )
UpperCamelCase_: List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
# reformer model
UpperCamelCase_: List[Any] = torch_model.reformer
# word embeds
UpperCamelCase_: Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
UpperCamelCase_: Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCamelCase_: str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
UpperCamelCase_: Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
UpperCamelCase_: int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCamelCase_: Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
UpperCamelCase_: Any = np.asarray(weights[7][0] )
UpperCamelCase_: List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
UpperCamelCase_: List[Any] = np.asarray(weights[9][0] )
UpperCamelCase_: Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
# Initialise PyTorch model
UpperCamelCase_: Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
UpperCamelCase_: str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 720 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 0 |
"""simple docstring"""
def lowercase ( ):
"""simple docstring"""
return 1
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase )
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase )
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase )
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase )
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase )
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase )
def lowercase ( UpperCamelCase : int = 200 ):
"""simple docstring"""
return two_pound(UpperCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 656 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: str = point_y / 4 / point_x
_lowercase: List[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowercase: Optional[int] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowercase: Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowercase: Optional[int] = outgoing_gradient**2 + 4
_lowercase: Optional[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowercase: Dict = (point_y - outgoing_gradient * point_x) ** 2 - 100
_lowercase: Union[str, Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowercase: str = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowercase: int = x_minus if isclose(__UpperCamelCase , __UpperCamelCase ) else x_plus
_lowercase: Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCAmelCase ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ):
"""simple docstring"""
_lowercase: List[str] = 0
_lowercase: int = first_x_coord
_lowercase: List[Any] = first_y_coord
_lowercase: int = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowercase , _lowercase , _lowercase: Union[str, Any] = next_point(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 713 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase: List[str] = np.shape(_UpperCamelCase )
if rows != columns:
_lowercase: int = (
'''\'table\' has to be of square shaped array but got a '''
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_UpperCamelCase )
_lowercase: Tuple = np.zeros((rows, columns) )
_lowercase: Tuple = np.zeros((rows, columns) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
_lowercase: List[Any] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_lowercase: Any = (table[i][j] - total) / upper[j][j]
_lowercase: Any = 1
for j in range(_UpperCamelCase , _UpperCamelCase ):
_lowercase: Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
_lowercase: List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
class __a( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE=125 ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ : Union[str, Any] = len(set(filter(lambda _SCREAMING_SNAKE_CASE : bool('''extra_id''' in str(a__ ) ) ,a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
UpperCAmelCase_ : List[Any] = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else pad_token
UpperCAmelCase_ : Union[str, Any] = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else eos_token
UpperCAmelCase_ : str = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else unk_token
super().__init__(
eos_token=a__ ,unk_token=a__ ,pad_token=a__ ,extra_ids=a__ ,additional_special_tokens=a__ ,**a__ ,)
UpperCAmelCase_ : Any = extra_ids
UpperCAmelCase_ : Any = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase_ : Optional[int] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase_ : List[str] = len(self.special_tokens_encoder )
UpperCAmelCase_ : int = len(a__ )
for i, token in enumerate(a__ ):
UpperCAmelCase_ : Tuple = self.vocab_size + i - n
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def a__ ( self ) -> List[str]:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ ,token_ids_a=a__ ,already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Optional[int]:
UpperCAmelCase_ : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> str:
UpperCAmelCase_ : Any = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ : Optional[Any] = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : List[Any] = [chr(a__ ) for i in text.encode('''utf-8''' )]
return tokens
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
if token in self.special_tokens_encoder:
UpperCAmelCase_ : Optional[int] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase_ : Any = self.added_tokens_encoder[token]
elif len(a__ ) != 1:
UpperCAmelCase_ : List[Any] = self.unk_token_id
else:
UpperCAmelCase_ : List[str] = ord(a__ ) + self._num_special_tokens
return token_id
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
if index in self.special_tokens_decoder:
UpperCAmelCase_ : str = self.special_tokens_decoder[index]
else:
UpperCAmelCase_ : List[str] = chr(index - self._num_special_tokens )
return token
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : List[str] = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase_ : Any = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
UpperCAmelCase_ : List[Any] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
UpperCAmelCase_ : List[str] = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
UpperCAmelCase_ : List[Any] = token.encode('''utf-8''' )
else:
UpperCAmelCase_ : str = bytes([ord(a__ )] )
bstring += tok_string
UpperCAmelCase_ : Dict = bstring.decode('''utf-8''' ,errors='''ignore''' )
return string
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Dict:
return () | 30 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case_ = concatenate_datasets
snake_case_ = DownloadConfig
snake_case_ = DownloadManager
snake_case_ = DownloadMode
snake_case_ = DownloadConfig
snake_case_ = DownloadMode
snake_case_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 592 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
create_state_space_tree(UpperCamelCase , [] , 0 , [0 for i in range(len(UpperCamelCase ) )] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
if index == len(UpperCamelCase ):
print(UpperCamelCase )
return
for i in range(len(UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCAmelCase__ : Optional[Any] = True
create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 , UpperCamelCase )
current_sequence.pop()
lowerCAmelCase__ : Any = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 160 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCAmelCase__ : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int:
super().__init__()
self.register_modules(
text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,movq=__UpperCAmelCase ,)
lowerCAmelCase__ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
if latents is None:
lowerCAmelCase__ : Optional[int] = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase__ : int = latents.to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,) -> Any:
lowerCAmelCase__ : List[str] = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else 1
# get prompt text embeddings
lowerCAmelCase__ : Any = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,truncation=__UpperCAmelCase ,max_length=77 ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = text_inputs.input_ids
lowerCAmelCase__ : Optional[int] = self.tokenizer(__UpperCAmelCase ,padding="""longest""" ,return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase__ : List[Any] = text_input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = text_inputs.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = prompt_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[int] = text_encoder_hidden_states.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Union[str, Any] = text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : List[str]
if negative_prompt is None:
lowerCAmelCase__ : Tuple = [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase__ : int = negative_prompt
lowerCAmelCase__ : List[str] = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=77 ,truncation=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Dict = uncond_input.input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = uncond_input.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
lowerCAmelCase__ : Optional[Any] = negative_prompt_embeds.repeat(1 ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
lowerCAmelCase__ : Tuple = uncond_text_encoder_hidden_states.repeat(1 ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,__UpperCAmelCase ,-1 )
lowerCAmelCase__ : List[str] = uncond_text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase__ : Any = torch.device(F"""cuda:{gpu_id}""" )
lowerCAmelCase__ : Union[str, Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase__ : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = cpu_offload_with_hook(__UpperCAmelCase ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
if self.safety_checker is not None:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = cpu_offload_with_hook(self.safety_checker ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
lowerCAmelCase__ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ) -> Optional[int]:
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 100 ,__UpperCAmelCase = 4.0 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = 1
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
lowerCAmelCase__ : str = self._execution_device
lowerCAmelCase__ : List[Any] = batch_size * num_images_per_prompt
lowerCAmelCase__ : int = guidance_scale > 1.0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._encode_prompt(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = torch.cat(__UpperCAmelCase ,dim=0 )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = torch.cat(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : Union[str, Any] = image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
lowerCAmelCase__ : Dict = self.unet.config.in_channels
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_new_h_w(__UpperCAmelCase ,__UpperCAmelCase ,self.movq_scale_factor )
# create initial latent
lowerCAmelCase__ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ : Optional[Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCAmelCase__ : Any = self.unet(
sample=__UpperCAmelCase ,timestep=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,added_cond_kwargs=__UpperCAmelCase ,return_dict=__UpperCAmelCase ,)[0]
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = noise_pred.chunk(2 )
lowerCAmelCase__ , lowerCAmelCase__ : int = variance_pred.chunk(2 )
lowerCAmelCase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase__ : str = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : Optional[int] = self.scheduler.step(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,generator=__UpperCAmelCase ,).prev_sample
# post-processing
lowerCAmelCase__ : int = self.movq.decode(__UpperCAmelCase ,force_not_quantize=__UpperCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCAmelCase__ : Tuple = image * 0.5 + 0.5
lowerCAmelCase__ : str = image.clamp(0 ,1 )
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 160 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''open-llama'''
def __init__( self : List[str] , _A : int=10_0000 , _A : Dict=4096 , _A : int=1_1008 , _A : str=32 , _A : str=32 , _A : Dict="silu" , _A : List[str]=2048 , _A : Optional[Any]=0.02 , _A : Union[str, Any]=1e-6 , _A : Optional[Any]=True , _A : Tuple=0 , _A : List[Any]=1 , _A : str=2 , _A : str=False , _A : Any=True , _A : List[Any]=0.1 , _A : Optional[int]=0.1 , _A : Any=True , _A : Any=True , _A : Optional[int]=None , **_A : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : Dict = rms_norm_eps
__SCREAMING_SNAKE_CASE : int = use_cache
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop(
'''use_memorry_efficient_attention''' , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = use_stable_embedding
__SCREAMING_SNAKE_CASE : Dict = shared_input_output_embedding
__SCREAMING_SNAKE_CASE : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__SCREAMING_SNAKE_CASE : List[str] = self.rope_scaling.get('''type''' , _A )
__SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 74 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ):
snake_case_ = []
snake_case_ = 0
snake_case_ = 0
def UpperCamelCase__ ( self ):
return self.head == self.tail
def UpperCamelCase__ ( self , _UpperCAmelCase ):
self.data.append(_UpperCAmelCase )
snake_case_ = self.tail + 1
def UpperCamelCase__ ( self ):
snake_case_ = self.data[self.head]
snake_case_ = self.head + 1
return ret
def UpperCamelCase__ ( self ):
return self.tail - self.head
def UpperCamelCase__ ( self ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = data
snake_case_ = None
snake_case_ = None
snake_case_ = 1
def UpperCamelCase__ ( self ):
return self.data
def UpperCamelCase__ ( self ):
return self.left
def UpperCamelCase__ ( self ):
return self.right
def UpperCamelCase__ ( self ):
return self.height
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = data
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = node
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = node
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = height
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if a > b:
return a
return b
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> MyNode:
"""simple docstring"""
print('''left rotation node:''' , node.get_data() )
snake_case_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(SCREAMING_SNAKE_CASE )
return ret
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> MyNode:
"""simple docstring"""
print('''right rotation node:''' , node.get_data() )
snake_case_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(SCREAMING_SNAKE_CASE )
return ret
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> MyNode:
"""simple docstring"""
snake_case_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(SCREAMING_SNAKE_CASE ) )
return right_rotation(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> MyNode:
"""simple docstring"""
snake_case_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(SCREAMING_SNAKE_CASE ) )
return left_rotation(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
snake_case_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
snake_case_ = right_rotation(SCREAMING_SNAKE_CASE )
else:
snake_case_ = lr_rotation(SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() , SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
snake_case_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
snake_case_ = rl_rotation(SCREAMING_SNAKE_CASE )
else:
snake_case_ = left_rotation(SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE )
return node
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
while True:
snake_case_ = root.get_right()
if right_child is None:
break
snake_case_ = right_child
return root.get_data()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
while True:
snake_case_ = root.get_left()
if left_child is None:
break
snake_case_ = left_child
return root.get_data()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> MyNode | None:
"""simple docstring"""
snake_case_ = root.get_left()
snake_case_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
snake_case_ = get_left_most(SCREAMING_SNAKE_CASE )
root.set_data(SCREAMING_SNAKE_CASE )
root.set_right(del_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
snake_case_ = left_child
elif right_child is not None:
snake_case_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if get_height(SCREAMING_SNAKE_CASE ) - get_height(SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
snake_case_ = left_rotation(SCREAMING_SNAKE_CASE )
else:
snake_case_ = rl_rotation(SCREAMING_SNAKE_CASE )
elif get_height(SCREAMING_SNAKE_CASE ) - get_height(SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
snake_case_ = right_rotation(SCREAMING_SNAKE_CASE )
else:
snake_case_ = lr_rotation(SCREAMING_SNAKE_CASE )
snake_case_ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(SCREAMING_SNAKE_CASE )
return root
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ):
snake_case_ = None
def UpperCamelCase__ ( self ):
return get_height(self.root )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
print('''insert:''' + str(_UpperCAmelCase ) )
snake_case_ = insert_node(self.root , _UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
print('''delete:''' + str(_UpperCAmelCase ) )
if self.root is None:
print('''Tree is empty!''' )
return
snake_case_ = del_node(self.root , _UpperCAmelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
snake_case_ = ''''''
snake_case_ = MyQueue()
q.push(self.root )
snake_case_ = self.get_height()
if layer == 0:
return output
snake_case_ = 0
while not q.is_empty():
snake_case_ = q.pop()
snake_case_ = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_UpperCAmelCase )
q.push(_UpperCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
snake_case_ = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , _UpperCAmelCase ) - 1:
snake_case_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCAmelCase ()-> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCAmelCase = AVLtree()
UpperCAmelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t)) | 531 |
import numpy as np
import datasets
UpperCAmelCase = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
UpperCAmelCase = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
UpperCAmelCase = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# convert to numpy arrays
snake_case_ = np.array(_UpperCAmelCase )
snake_case_ = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
snake_case_ = X - np.mean(_UpperCAmelCase )
snake_case_ = np.cov(reference_distribution.T )
try:
snake_case_ = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
snake_case_ = np.linalg.pinv(_UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 531 | 1 |
'''simple docstring'''
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_UpperCamelCase = 6
_UpperCamelCase = 1
_UpperCamelCase = 1901
_UpperCamelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_UpperCamelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 98 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ):
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Union[str, Any] = use_token_type_ids
lowercase__ : Any = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : int = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : int = num_choices
lowercase__ : List[Any] = scope
lowercase__ : Any = self.vocab_size - 1
def snake_case_ ( self):
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[Any] = None
if self.use_token_type_ids:
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ : List[str] = None
lowercase__ : Tuple = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase__ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , a , a , a , a , *a):
lowercase__ : Union[str, Any] = OpenAIGPTModel(config=a)
model.to(a)
model.eval()
lowercase__ : List[str] = model(a , token_type_ids=a , head_mask=a)
lowercase__ : str = model(a , token_type_ids=a)
lowercase__ : Dict = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a , a , a , a , *a):
lowercase__ : Optional[Any] = OpenAIGPTLMHeadModel(a)
model.to(a)
model.eval()
lowercase__ : int = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a , a , a , a , *a):
lowercase__ : Any = OpenAIGPTDoubleHeadsModel(a)
model.to(a)
model.eval()
lowercase__ : Tuple = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a , a , a , a , *a):
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : Optional[int] = OpenAIGPTForSequenceClassification(a)
model.to(a)
model.eval()
lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : str = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self):
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = config_and_inputs
lowercase__ : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
__lowerCamelCase : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCamelCase : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCamelCase : Dict = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , a , a , a , a , a):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , a , a , a=False):
lowercase__ : Any = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
lowercase__ : int = inputs_dict['labels']
lowercase__ : List[str] = inputs_dict['labels']
lowercase__ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
lowercase__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def snake_case_ ( self):
lowercase__ : Any = OpenAIGPTModelTester(self)
lowercase__ : List[Any] = ConfigTester(self , config_class=a , n_embd=37)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a)
def snake_case_ ( self):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a)
def snake_case_ ( self):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a)
def snake_case_ ( self):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a)
@slow
def snake_case_ ( self):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = OpenAIGPTModel.from_pretrained(a)
self.assertIsNotNone(a)
@require_torch
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@slow
def snake_case_ ( self):
lowercase__ : str = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
model.to(a)
lowercase__ : str = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a) # the president is
lowercase__ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase__ : Tuple = model.generate(a , do_sample=a)
self.assertListEqual(output_ids[0].tolist() , a)
| 164 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase ( A__: list[int] , A__: list[int] , A__: int ) -> list[int]:
__lowerCamelCase : List[Any] = [0] * no_of_processes
__lowerCamelCase : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A__ ):
__lowerCamelCase : Dict = burst_time[i]
__lowerCamelCase : Any = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Union[str, Any] = 999999999
__lowerCamelCase : str = 0
__lowerCamelCase : Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowerCamelCase : List[Any] = remaining_time[j]
__lowerCamelCase : List[str] = j
__lowerCamelCase : Optional[int] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowerCamelCase : Optional[Any] = remaining_time[short]
if minm == 0:
__lowerCamelCase : Optional[int] = 999999999
if remaining_time[short] == 0:
complete += 1
__lowerCamelCase : List[str] = False
# Find finish time of current process
__lowerCamelCase : Dict = increment_time + 1
# Calculate waiting time
__lowerCamelCase : Any = finish_time - arrival_time[short]
__lowerCamelCase : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
__lowerCamelCase : Optional[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase ( A__: list[int] , A__: int , A__: list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = [0] * no_of_processes
for i in range(A__ ):
__lowerCamelCase : Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase ( A__: list[int] , A__: list[int] , A__: int ) -> None:
__lowerCamelCase : int = 0
__lowerCamelCase : Dict = 0
for i in range(A__ ):
__lowerCamelCase : str = total_waiting_time + waiting_time[i]
__lowerCamelCase : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a_ : int = int(input())
a_ : List[str] = [0] * no_of_processes
a_ : int = [0] * no_of_processes
a_ : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a_ , a_ : Union[str, Any] = map(int, input().split())
a_ : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a_ : List[str] = burst_time
a_ : List[Any] = no_of_processes
a_ : Tuple = waiting_time
a_ : Optional[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a_ : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 263 | 1 |
"""simple docstring"""
from math import ceil
def lowercase ( lowerCAmelCase__ = 1_001 ):
lowerCamelCase_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
lowerCamelCase_ = 2 * i + 1
lowerCamelCase_ = 2 * i
lowerCamelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 29 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'layoutlmv3'
def __init__( self ,a_=5_0265 ,a_=768 ,a_=12 ,a_=12 ,a_=3072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=1e-5 ,a_=1 ,a_=0 ,a_=2 ,a_=1024 ,a_=128 ,a_=128 ,a_=True ,a_=32 ,a_=128 ,a_=64 ,a_=256 ,a_=True ,a_=True ,a_=True ,a_=224 ,a_=3 ,a_=16 ,a_=None ,**a_ ,):
"""simple docstring"""
super().__init__(
vocab_size=a_ ,hidden_size=a_ ,num_hidden_layers=a_ ,num_attention_heads=a_ ,intermediate_size=a_ ,hidden_act=a_ ,hidden_dropout_prob=a_ ,attention_probs_dropout_prob=a_ ,max_position_embeddings=a_ ,type_vocab_size=a_ ,initializer_range=a_ ,layer_norm_eps=a_ ,pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,**a_ ,)
lowerCAmelCase__ = max_ad_position_embeddings
lowerCAmelCase__ = coordinate_size
lowerCAmelCase__ = shape_size
lowerCAmelCase__ = has_relative_attention_bias
lowerCAmelCase__ = rel_pos_bins
lowerCAmelCase__ = max_rel_pos
lowerCAmelCase__ = has_spatial_attention_bias
lowerCAmelCase__ = rel_ad_pos_bins
lowerCAmelCase__ = max_rel_ad_pos
lowerCAmelCase__ = text_embed
lowerCAmelCase__ = visual_embed
lowerCAmelCase__ = input_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = version.parse('1.12' )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 1e-5
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 12
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = -1 ,a_ = -1 ,a_ = False ,a_ = None ,a_ = 3 ,a_ = 40 ,a_ = 40 ,):
"""simple docstring"""
setattr(processor.image_processor ,'apply_ocr' ,a_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ = compute_effective_axis_dimension(
a_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ = processor.tokenizer.num_special_tokens_to_add(a_ )
lowerCAmelCase__ = compute_effective_axis_dimension(
a_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCAmelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCAmelCase__ = self._generate_dummy_images(a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = dict(
processor(
a_ ,text=a_ ,boxes=a_ ,return_tensors=a_ ,) )
return inputs
| 604 |
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase__ = []
lowerCAmelCase__ = list(range(snake_case__ ) )
# Find permutation
while factorials:
lowerCAmelCase__ = factorials.pop()
lowerCAmelCase__ , lowerCAmelCase__ = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.