code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] = AudioLDMPipeline
a : Optional[Any] = TEXT_TO_AUDIO_PARAMS
a : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
a : Optional[int] = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase ( self : Any ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowercase , )
__UpperCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__UpperCAmelCase : Optional[int] = ClapTextModelWithProjection(__lowercase )
__UpperCAmelCase : str = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__UpperCAmelCase : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowercase , )
__UpperCAmelCase : int = SpeechTaHifiGan(__lowercase )
__UpperCAmelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCAmelCase ( self : Optional[int] , __lowercase : Any , __lowercase : str=0 ) -> List[str]:
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Dict = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Tuple = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : List[Any] = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__lowercase )
__UpperCAmelCase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__UpperCAmelCase : str = audio[:10]
__UpperCAmelCase : List[Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
__UpperCAmelCase : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Dict = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__lowercase )
__UpperCAmelCase : int = output.audios[0]
__UpperCAmelCase : List[str] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Any = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : Tuple = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Optional[Any] = text_inputs["""input_ids"""].to(__lowercase )
__UpperCAmelCase : int = audioldm_pipe.text_encoder(
__lowercase , )
__UpperCAmelCase : Dict = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Tuple = F.normalize(__lowercase , dim=-1 )
__UpperCAmelCase : Tuple = prompt_embeds
# forward
__UpperCAmelCase : Dict = audioldm_pipe(**__lowercase )
__UpperCAmelCase : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
__UpperCAmelCase : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : Optional[Any] = negative_prompt
__UpperCAmelCase : Tuple = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : int = audioldm_pipe(**__lowercase )
__UpperCAmelCase : Any = output.audios[0]
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : List[Any] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : List[str] = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = text_inputs["""input_ids"""].to(__lowercase )
__UpperCAmelCase : Optional[Any] = audioldm_pipe.text_encoder(
__lowercase , )
__UpperCAmelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Any = F.normalize(__lowercase , dim=-1 )
embeds.append(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = embeds
# forward
__UpperCAmelCase : str = audioldm_pipe(**__lowercase )
__UpperCAmelCase : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : Tuple = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = """egg cracking"""
__UpperCAmelCase : Optional[Any] = audioldm_pipe(**__lowercase , negative_prompt=__lowercase )
__UpperCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__UpperCAmelCase : Union[str, Any] = audio[:10]
__UpperCAmelCase : int = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : str ) -> Any:
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : Tuple = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(__lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : int = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__UpperCAmelCase : int = 2
__UpperCAmelCase : str = audioldm_pipe(__lowercase , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__UpperCAmelCase : Any = 2
__UpperCAmelCase : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : int = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = audioldm_pipe.vocoder.config.sampling_rate
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **__lowercase )
__UpperCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.016
__UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **__lowercase )
__UpperCAmelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = ["""hey"""]
__UpperCAmelCase : Dict = audioldm_pipe(__lowercase , num_inference_steps=1 )
__UpperCAmelCase : Tuple = output.audios.shape
assert audio_shape == (1, 256)
__UpperCAmelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__UpperCAmelCase : List[Any] = SpeechTaHifiGan(__lowercase ).to(__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe(__lowercase , num_inference_steps=1 )
__UpperCAmelCase : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase )
def UpperCAmelCase ( self : str ) -> Any:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase )
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : int="cpu" , __lowercase : List[Any]=torch.floataa , __lowercase : Tuple=0 ) -> Dict:
__UpperCAmelCase : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Dict = np.random.RandomState(__lowercase ).standard_normal((1, 8, 128, 16) )
__UpperCAmelCase : Optional[Any] = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__UpperCAmelCase : int = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCAmelCase ( self : int ) -> List[str]:
__UpperCAmelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Tuple = self.get_inputs(__lowercase )
__UpperCAmelCase : str = 25
__UpperCAmelCase : Optional[int] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81920
__UpperCAmelCase : Dict = audio[77230:77240]
__UpperCAmelCase : Optional[Any] = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
__UpperCAmelCase : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Optional[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__UpperCAmelCase : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = self.get_inputs(__lowercase )
__UpperCAmelCase : Optional[int] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81920
__UpperCAmelCase : int = audio[27780:27790]
__UpperCAmelCase : Optional[Any] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
__UpperCAmelCase : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : str = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__UpperCAmelCase : Any = True if """large""" in model_name or """huge""" in model_name else False
__UpperCAmelCase : int = True if """large""" in model_name or """huge""" in model_name else False
__UpperCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 3, 3]
__UpperCAmelCase : Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
__UpperCAmelCase : str = [4, 4, 4, 4]
__UpperCAmelCase : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__UpperCAmelCase : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
__UpperCAmelCase : Optional[Any] = [3, 3, 3, 3]
else:
__UpperCAmelCase : Optional[int] = [2, 2, 2, 2]
if "tiny" in model_name:
__UpperCAmelCase : List[str] = 96
elif "small" in model_name:
__UpperCAmelCase : Dict = 96
elif "base" in model_name:
__UpperCAmelCase : List[Any] = 128
elif "large" in model_name:
__UpperCAmelCase : Any = 192
elif "xlarge" in model_name:
__UpperCAmelCase : Tuple = 256
elif "huge" in model_name:
__UpperCAmelCase : int = 352
# set label information
__UpperCAmelCase : Tuple = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__UpperCAmelCase : Any = """imagenet-22k-id2label.json"""
else:
__UpperCAmelCase : Dict = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : Optional[int] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : List[str] = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
if "patch_embed.proj" in name:
__UpperCAmelCase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__UpperCAmelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__UpperCAmelCase : int = """encoder.""" + name
if "encoder.layers" in name:
__UpperCAmelCase : Optional[int] = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__UpperCAmelCase : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__UpperCAmelCase : List[str] = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__UpperCAmelCase : List[Any] = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__UpperCAmelCase : str = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__UpperCAmelCase : Optional[Any] = """layernorm.weight"""
if name == "norm.bias":
__UpperCAmelCase : Dict = """layernorm.bias"""
if "head" in name:
__UpperCAmelCase : Tuple = name.replace("""head""" , """classifier""" )
else:
__UpperCAmelCase : int = """focalnet.""" + name
return name
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ):
# fmt: off
__UpperCAmelCase : Dict = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__UpperCAmelCase : int = model_name_to_url[model_name]
print("""Checkpoint URL: """ , __lowerCamelCase )
__UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__UpperCAmelCase : Tuple = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = val
__UpperCAmelCase : Optional[Any] = get_focalnet_config(__lowerCamelCase )
__UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
__UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Union[str, Any] = BitImageProcessor(
do_resize=__lowerCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
__UpperCAmelCase : List[Any] = processor(images=__lowerCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__UpperCAmelCase : List[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
__UpperCAmelCase : Dict = model(**__lowerCamelCase )
__UpperCAmelCase : Any = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__UpperCAmelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__UpperCAmelCase : int = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__UpperCAmelCase : int = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__UpperCAmelCase : Optional[int] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__UpperCAmelCase : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
a : Any = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'van'
def __init__( self : Union[str, Any] , __lowercase : int=224 , __lowercase : List[Any]=3 , __lowercase : List[str]=[7, 3, 3, 3] , __lowercase : Tuple=[4, 2, 2, 2] , __lowercase : Any=[64, 128, 320, 512] , __lowercase : str=[3, 3, 12, 3] , __lowercase : Tuple=[8, 8, 4, 4] , __lowercase : Union[str, Any]="gelu" , __lowercase : Optional[int]=0.02 , __lowercase : Union[str, Any]=1e-6 , __lowercase : Tuple=1e-2 , __lowercase : int=0.0 , __lowercase : Any=0.0 , **__lowercase : Any , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : List[str] = patch_sizes
__UpperCAmelCase : Tuple = strides
__UpperCAmelCase : List[Any] = hidden_sizes
__UpperCAmelCase : Tuple = depths
__UpperCAmelCase : Optional[int] = mlp_ratios
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = layer_scale_init_value
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : Optional[int] = dropout_rate
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a : Optional[int] = "hf-internal-testing/tiny-random-bert"
a : Union[str, Any] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
a : Optional[int] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : str = cached_file(__lowercase , __lowercase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowercase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowercase , __lowercase ) ) )
with open(os.path.join(__lowercase , """refs""" , """main""" ) ) as f:
__UpperCAmelCase : List[str] = f.read()
self.assertEqual(__lowercase , os.path.join(__lowercase , """snapshots""" , __lowercase , __lowercase ) )
self.assertTrue(os.path.isfile(__lowercase ) )
# File is cached at the same place the second time.
__UpperCAmelCase : Tuple = cached_file(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Using a specific revision to test the full commit hash.
__UpperCAmelCase : Union[str, Any] = cached_file(__lowercase , __lowercase , revision="""9b8c223""" )
self.assertEqual(__lowercase , os.path.join(__lowercase , """snapshots""" , __lowercase , __lowercase ) )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
with self.assertRaisesRegex(__lowercase , """is not a valid model identifier""" ):
__UpperCAmelCase : Optional[int] = cached_file("""tiny-random-bert""" , __lowercase )
with self.assertRaisesRegex(__lowercase , """is not a valid git identifier""" ):
__UpperCAmelCase : Tuple = cached_file(__lowercase , __lowercase , revision="""aaaa""" )
with self.assertRaisesRegex(__lowercase , """does not appear to have a file named""" ):
__UpperCAmelCase : Optional[int] = cached_file(__lowercase , """conf""" )
def UpperCAmelCase ( self : Any ) -> List[Any]:
with self.assertRaisesRegex(__lowercase , """does not appear to have a file named""" ):
__UpperCAmelCase : Union[str, Any] = cached_file(__lowercase , """conf""" )
with open(os.path.join(__lowercase , """refs""" , """main""" ) ) as f:
__UpperCAmelCase : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowercase , """.no_exist""" , __lowercase , """conf""" ) ) )
__UpperCAmelCase : List[Any] = cached_file(__lowercase , """conf""" , _raise_exceptions_for_missing_entries=__lowercase )
self.assertIsNone(__lowercase )
__UpperCAmelCase : Optional[int] = cached_file(__lowercase , """conf""" , local_files_only=__lowercase , _raise_exceptions_for_missing_entries=__lowercase )
self.assertIsNone(__lowercase )
__UpperCAmelCase : Dict = mock.Mock()
__UpperCAmelCase : Optional[int] = 500
__UpperCAmelCase : int = {}
__UpperCAmelCase : Optional[int] = HTTPError
__UpperCAmelCase : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__lowercase ) as mock_head:
__UpperCAmelCase : Dict = cached_file(__lowercase , """conf""" , _raise_exceptions_for_connection_errors=__lowercase )
self.assertIsNone(__lowercase )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : List[str] ) -> int:
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __lowercase ) )
def UpperCAmelCase ( self : str ) -> List[str]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowercase , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , __lowercase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowercase , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , __lowercase , revision="""ahaha""" )
__UpperCAmelCase : Union[str, Any] = get_file_from_repo("""bert-base-cased""" , __lowercase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__UpperCAmelCase : Optional[int] = json.loads(open(__lowercase , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def UpperCAmelCase ( self : List[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Optional[int] = Path(__lowercase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(__lowercase , """a.txt""" ) , str(__lowercase ) )
self.assertIsNone(get_file_from_repo(__lowercase , """b.txt""" ) )
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
import unittest
from knapsack import knapsack as k
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : str = [0]
__UpperCAmelCase : Union[str, Any] = [0]
__UpperCAmelCase : Optional[int] = len(__lowercase )
self.assertEqual(k.knapsack(__lowercase , __lowercase , __lowercase , __lowercase ) , 0 )
__UpperCAmelCase : str = [60]
__UpperCAmelCase : Optional[int] = [10]
__UpperCAmelCase : Tuple = len(__lowercase )
self.assertEqual(k.knapsack(__lowercase , __lowercase , __lowercase , __lowercase ) , 0 )
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Any = [1, 2, 3]
__UpperCAmelCase : Dict = [3, 2, 1]
__UpperCAmelCase : Optional[Any] = len(__lowercase )
self.assertEqual(k.knapsack(__lowercase , __lowercase , __lowercase , __lowercase ) , 5 )
def UpperCAmelCase ( self : int ) -> str:
__UpperCAmelCase : int = 50
__UpperCAmelCase : Tuple = [60, 100, 120]
__UpperCAmelCase : int = [10, 20, 30]
__UpperCAmelCase : str = len(__lowercase )
self.assertEqual(k.knapsack(__lowercase , __lowercase , __lowercase , __lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class a ( lowercase__ ):
"""simple docstring"""
a : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
a : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
a : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
a : str = "question"
a : str = "context"
a : str = "answers"
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
a : Tuple = 6_37_81_37.0
a : Any = 6_35_67_52.31_42_45
a : Tuple = 6_378_137
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
__UpperCAmelCase : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
__UpperCAmelCase : List[str] = atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) )
__UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) )
__UpperCAmelCase : Optional[Any] = radians(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = radians(__lowerCamelCase )
# Equation
__UpperCAmelCase : Any = sin((phi_a - phi_a) / 2 )
__UpperCAmelCase : Any = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCAmelCase : List[Any] = sqrt(sin_sq_phi + (cos(__lowerCamelCase ) * cos(__lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
class a :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = {}
def UpperCAmelCase ( self : str ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowercase , """ -> """ , """ -> """.join([str(__lowercase ) for j in self.vertex[i]] ) )
def UpperCAmelCase ( self : Tuple , __lowercase : int , __lowercase : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowercase )
else:
# else make a new vertex
__UpperCAmelCase : Optional[int] = [to_vertex]
def UpperCAmelCase ( self : int ) -> None:
# visited array for storing already visited nodes
__UpperCAmelCase : Union[str, Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : int , __lowercase : list ) -> None:
# mark start vertex as visited
__UpperCAmelCase : Optional[Any] = True
print(__lowercase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
if __name__ == "__main__":
a : Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
from __future__ import annotations
a : Optional[Any] = list[list[int]]
# assigning initial values to the grid
a : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( __lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( __lowerCamelCase : Matrix ):
if location := find_empty_location(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = digit
if sudoku(__lowerCamelCase ) is not None:
return grid
__UpperCAmelCase : Union[str, Any] = 0
return None
def lowerCamelCase__ ( __lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(__lowerCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
a : Optional[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : int = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
from __future__ import annotations
a : str = "Muhammad Umer Farooq"
a : Tuple = "MIT"
a : int = "1.0.0"
a : List[str] = "Muhammad Umer Farooq"
a : Union[str, Any] = "contact@muhammadumerfarooq.me"
a : Tuple = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] , __lowercase : str ) -> None:
super().__init__()
__UpperCAmelCase : list[str] = []
__UpperCAmelCase : List[Any] = domain
def UpperCAmelCase ( self : Tuple , __lowercase : str , __lowercase : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCAmelCase : Union[str, Any] = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCamelCase__ ( __lowerCamelCase : str ):
return ".".join(get_sub_domain_name(__lowerCamelCase ).split(""".""" )[-2:] )
def lowerCamelCase__ ( __lowerCamelCase : str ):
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCamelCase__ ( __lowerCamelCase : str = "https://github.com" ):
__UpperCAmelCase : Union[str, Any] = get_domain_name(__lowerCamelCase )
# Initialize the parser
__UpperCAmelCase : Tuple = Parser(__lowerCamelCase )
try:
# Open URL
__UpperCAmelCase : Dict = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCAmelCase : Tuple = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCAmelCase : Tuple = requests.get(__lowerCamelCase )
# Get the valid email.
__UpperCAmelCase : Optional[int] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
a : Any = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
from collections.abc import Sequence
def lowerCamelCase__ ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ):
if not arr:
return 0
__UpperCAmelCase : Optional[int] = 0 if allow_empty_subarrays else float("""-inf""" )
__UpperCAmelCase : Optional[int] = 0.0
for num in arr:
__UpperCAmelCase : str = max(0 if allow_empty_subarrays else num , curr_sum + num )
__UpperCAmelCase : Dict = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Dict = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Tuple = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
__UpperCAmelCase : Any = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__UpperCAmelCase : str = model(__lowercase )["""last_hidden_state"""]
__UpperCAmelCase : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice.
__UpperCAmelCase : str = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[str] ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__lowercase , )
assert hasattr(self , """env""" )
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any] ) -> Any:
# configuration for running training on smdistributed Model Parallel
__UpperCAmelCase : List[Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
__UpperCAmelCase : Optional[int] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
__UpperCAmelCase : Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
__UpperCAmelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version="""py36""" , )
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[int] ) -> str:
TrainingJobAnalytics(__lowercase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def UpperCAmelCase ( self : Dict , __lowercase : Union[str, Any] ) -> Tuple:
# create estimator
__UpperCAmelCase : List[str] = self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
__UpperCAmelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCAmelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__UpperCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowercase )
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a : Any = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowercase : List[Any] , **__lowercase : List[Any] ) -> None:
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
__UpperCAmelCase : List[Any] = """pt"""
__UpperCAmelCase : List[Any] = """tf"""
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__lowercase )
model_tf.save_pretrained(__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Optional[int] = """mock_framework"""
# Framework provided - return whatever the user provides
__UpperCAmelCase : int = FeaturesManager.determine_framework(self.test_model , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> int:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCAmelCase : Optional[Any] = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCAmelCase : str = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Any = FeaturesManager.determine_framework(__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ):
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCAmelCase : Dict = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_torch_available""" , __lowercase ):
__UpperCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCAmelCase : List[str] = MagicMock(return_value=__lowercase )
__UpperCAmelCase : int = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , __lowercase ):
__UpperCAmelCase : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# Both not in environment -> raise error
__UpperCAmelCase : Optional[int] = MagicMock(return_value=__lowercase )
__UpperCAmelCase : Tuple = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , __lowercase ):
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
| 63 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = CLIPConfig
a : Optional[Any] = ['CLIPEncoderLayer']
def __init__( self : List[str] , __lowercase : CLIPConfig ) -> List[Any]:
super().__init__(__lowercase )
__UpperCAmelCase : Any = CLIPVisionModelWithProjection(config.vision_config )
__UpperCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
__UpperCAmelCase : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase ( self : Any , __lowercase : List[str] , __lowercase : Dict , __lowercase : Any=0.5 , __lowercase : Dict=0.5 ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowercase )[0]
__UpperCAmelCase : List[str] = self.p_head(__lowercase )
__UpperCAmelCase : Dict = nsfw_detected.flatten()
__UpperCAmelCase : List[Any] = nsfw_detected > p_threshold
__UpperCAmelCase : Dict = nsfw_detected.tolist()
if any(__lowercase ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__lowercase ):
if nsfw_detected_:
__UpperCAmelCase : Optional[Any] = np.zeros(images[idx].shape )
__UpperCAmelCase : Optional[int] = self.w_head(__lowercase )
__UpperCAmelCase : Tuple = watermark_detected.flatten()
__UpperCAmelCase : List[Any] = watermark_detected > w_threshold
__UpperCAmelCase : int = watermark_detected.tolist()
if any(__lowercase ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__lowercase ):
if watermark_detected_:
__UpperCAmelCase : Optional[Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
from itertools import product
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = sides_number
__UpperCAmelCase : int = max_face_number * dice_number
__UpperCAmelCase : List[str] = [0] * (max_total + 1)
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : List[str] = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__UpperCAmelCase : Optional[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = 9
__UpperCAmelCase : int = 4 * 9
__UpperCAmelCase : str = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__UpperCAmelCase : List[str] = (4**9) * (6**6)
__UpperCAmelCase : str = peter_wins_count / total_games_number
__UpperCAmelCase : List[str] = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowercase : str = "cpu" , __lowercase : str = "openai/clip-vit-large-patch14" ) -> None:
__UpperCAmelCase : List[str] = device
__UpperCAmelCase : Any = CLIPTokenizerFast.from_pretrained(__lowercase )
__UpperCAmelCase : Optional[int] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
__UpperCAmelCase : List[str] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
__UpperCAmelCase : Dict = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__UpperCAmelCase : Tuple = torchvision.transforms.Resize(224 )
__UpperCAmelCase : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def UpperCAmelCase ( self : int , __lowercase : Dict ) -> Dict:
__UpperCAmelCase : Tuple = self.resize(__lowercase )
__UpperCAmelCase : Union[str, Any] = self.center_crop(__lowercase )
__UpperCAmelCase : Any = self.normalize(__lowercase )
return images
def __call__( self : List[Any] , __lowercase : Any=None , __lowercase : str=None , **__lowercase : int ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.tokenizer(text=__lowercase , **__lowercase )
__UpperCAmelCase : str = self.preprocess_img(__lowercase )
__UpperCAmelCase : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __lowercase : Optional[int]=10 , __lowercase : Optional[int]=0.01 , __lowercase : str=None , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : Dict=None , __lowercase : Any=None , __lowercase : List[str]=None , __lowercase : List[str]=False , __lowercase : Dict=True , __lowercase : Optional[Any]="image" , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=False , __lowercase : Any=False , __lowercase : Union[str, Any]=False , ) -> None:
super().__init__()
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = device if device else get_device()
if vqgan:
__UpperCAmelCase : Dict = vqgan
else:
__UpperCAmelCase : Optional[int] = load_vqgan(self.device , conf_path=__lowercase , ckpt_path=__lowercase )
self.vqgan.eval()
if clip:
__UpperCAmelCase : Optional[int] = clip
else:
__UpperCAmelCase : Dict = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__UpperCAmelCase : Optional[Any] = ProcessorGradientFlow(device=self.device )
__UpperCAmelCase : int = iterations
__UpperCAmelCase : Union[str, Any] = lr
__UpperCAmelCase : Dict = log
__UpperCAmelCase : Tuple = make_grid
__UpperCAmelCase : List[str] = return_val
__UpperCAmelCase : Any = quantize
__UpperCAmelCase : str = self.vqgan.decoder.z_shape
def UpperCAmelCase ( self : List[str] , __lowercase : Any=None , __lowercase : Any=None , __lowercase : Union[str, Any]=5 , __lowercase : str=True ) -> List[str]:
__UpperCAmelCase : Dict = []
if output_path is None:
__UpperCAmelCase : Dict = """./animation.gif"""
if input_path is None:
__UpperCAmelCase : str = self.save_path
__UpperCAmelCase : Any = sorted(glob(input_path + """/*""" ) )
if not len(__lowercase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__lowercase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__UpperCAmelCase : Tuple = total_duration / len(__lowercase )
__UpperCAmelCase : Union[str, Any] = [frame_duration] * len(__lowercase )
if extend_frames:
__UpperCAmelCase : Any = 1.5
__UpperCAmelCase : Tuple = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__lowercase ) )
imageio.mimsave(__lowercase , __lowercase , duration=__lowercase )
print(f"""gif saved to {output_path}""" )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Dict=None , __lowercase : List[Any]=None ) -> List[Any]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__UpperCAmelCase : Optional[Any] = preprocess(Image.open(__lowercase ) , target_image_size=256 ).to(self.device )
__UpperCAmelCase : str = preprocess_vqgan(__lowercase )
__UpperCAmelCase , *__UpperCAmelCase : List[Any] = self.vqgan.encode(__lowercase )
return z
def UpperCAmelCase ( self : str , __lowercase : List[str] ) -> Tuple:
__UpperCAmelCase : Any = self.latent.detach().requires_grad_()
__UpperCAmelCase : Any = base_latent + transform_vector
if self.quantize:
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = self.vqgan.quantize(__lowercase )
else:
__UpperCAmelCase : Tuple = trans_latent
return self.vqgan.decode(__lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : str , __lowercase : Dict , __lowercase : List[str]=None ) -> Union[str, Any]:
__UpperCAmelCase : int = self.clip_preprocessor(text=__lowercase , images=__lowercase , return_tensors="""pt""" , padding=__lowercase )
__UpperCAmelCase : int = self.clip(**__lowercase )
__UpperCAmelCase : List[Any] = clip_outputs.logits_per_image
if weights is not None:
__UpperCAmelCase : Union[str, Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase ( self : Any , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , __lowercase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__UpperCAmelCase : Union[str, Any] = self._get_clip_similarity(neg_prompts["""prompts"""] , __lowercase , weights=neg_prompts["""weights"""] )
else:
__UpperCAmelCase : str = torch.tensor([1] , device=self.device )
__UpperCAmelCase : List[str] = -torch.log(__lowercase ) + torch.log(__lowercase )
return loss
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> List[str]:
__UpperCAmelCase : Optional[int] = torch.randn_like(self.latent , requires_grad=__lowercase , device=self.device )
__UpperCAmelCase : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__UpperCAmelCase : Optional[Any] = self._add_vector(__lowercase )
__UpperCAmelCase : List[Any] = loop_post_process(__lowercase )
__UpperCAmelCase : List[Any] = self._get_CLIP_loss(__lowercase , __lowercase , __lowercase )
print("""CLIP loss""" , __lowercase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Optional[int] ) -> Optional[int]:
wandb.init(reinit=__lowercase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__UpperCAmelCase : Optional[int] = Image.open(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(__lowercase ) )
def UpperCAmelCase ( self : str , __lowercase : Tuple ) -> str:
if not prompts:
return []
__UpperCAmelCase : str = []
__UpperCAmelCase : str = []
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Any = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__lowercase , (tuple, list) ):
__UpperCAmelCase : str = prompt[0]
__UpperCAmelCase : Optional[int] = float(prompt[1] )
elif ":" in prompt:
__UpperCAmelCase , __UpperCAmelCase : Tuple = prompt.split(""":""" )
__UpperCAmelCase : Union[str, Any] = float(__lowercase )
else:
__UpperCAmelCase : str = prompt
__UpperCAmelCase : List[Any] = 1.0
processed_prompts.append(__lowercase )
weights.append(__lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowercase , device=self.device ),
}
def UpperCAmelCase ( self : Optional[int] , __lowercase : Tuple , __lowercase : Dict=None , __lowercase : Optional[int]=None , __lowercase : List[str]=True , __lowercase : Dict=False , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , ) -> Optional[int]:
if image_path:
__UpperCAmelCase : Optional[Any] = self._get_latent(__lowercase )
else:
__UpperCAmelCase : int = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__lowercase , __lowercase , __lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
__UpperCAmelCase : Dict = self.process_prompts(__lowercase )
__UpperCAmelCase : Optional[Any] = self.process_prompts(__lowercase )
if save_final and save_path is None:
__UpperCAmelCase : Optional[Any] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__lowercase ):
os.makedirs(__lowercase )
else:
__UpperCAmelCase : Dict = save_path + """_""" + get_timestamp()
os.makedirs(__lowercase )
__UpperCAmelCase : List[str] = save_path
__UpperCAmelCase : Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__lowercase ) )
__UpperCAmelCase : Dict = loop_post_process(__lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowercase , __lowercase , __lowercase ) ):
if show_intermediate:
show_pil(__lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__lowercase )} )
if show_final:
show_pil(__lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 63 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
a : int = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
for attribute in key.split(""".""" ):
__UpperCAmelCase : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
__UpperCAmelCase : Any = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
__UpperCAmelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : int = value
elif weight_type == "bias":
__UpperCAmelCase : List[str] = value
else:
__UpperCAmelCase : Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Tuple = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : Dict = True
if "*" in mapped_key:
__UpperCAmelCase : Any = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : int = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
__UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : str = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCAmelCase : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Tuple = """weight"""
else:
__UpperCAmelCase : List[str] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : int = name.split(""".""" )
__UpperCAmelCase : Optional[int] = int(items[0] )
__UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCAmelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=None ):
# load the pre-trained checkpoints
__UpperCAmelCase : str = torch.load(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
__UpperCAmelCase : Any = WavLMOrig(__lowerCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__UpperCAmelCase : Dict = WavLMConfig.from_pretrained(__lowerCamelCase )
else:
__UpperCAmelCase : Tuple = WavLMConfig()
__UpperCAmelCase : str = WavLMModel(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavlm.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
a : List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 63 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a :
"""simple docstring"""
a : torch.Tensor # [batch_size x 3]
a : torch.Tensor # [batch_size x 3]
a : torch.Tensor # [batch_size x 3]
a : torch.Tensor # [batch_size x 3]
a : int
a : int
a : float
a : float
a : Tuple[int]
def UpperCAmelCase ( self : List[Any] ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCAmelCase ( self : List[Any] ) -> torch.Tensor:
__UpperCAmelCase : Optional[Any] = torch.arange(self.height * self.width )
__UpperCAmelCase : str = torch.stack(
[
pixel_indices % self.width,
torch.div(__lowercase , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase , *__UpperCAmelCase : str = self.shape
__UpperCAmelCase : List[str] = int(np.prod(__lowercase ) )
__UpperCAmelCase : int = self.get_image_coords()
__UpperCAmelCase : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__UpperCAmelCase : int = self.get_camera_rays(__lowercase )
__UpperCAmelCase : Tuple = rays.view(__lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.Tensor ) -> torch.Tensor:
__UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__UpperCAmelCase : Tuple = coords.view(__lowercase , -1 , 2 )
__UpperCAmelCase : Tuple = self.resolution()
__UpperCAmelCase : Any = self.fov()
__UpperCAmelCase : Tuple = (flat.float() / (res - 1)) * 2 - 1
__UpperCAmelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
__UpperCAmelCase : str = fracs.view(__lowercase , -1 , 2 )
__UpperCAmelCase : Dict = (
self.z.view(__lowercase , 1 , 3 )
+ self.x.view(__lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
__UpperCAmelCase : Optional[int] = directions / directions.norm(dim=-1 , keepdim=__lowercase )
__UpperCAmelCase : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__lowercase , *__lowercase , 2 , 3 )
def UpperCAmelCase ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__lowercase , height=__lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__UpperCAmelCase : Optional[Any] = np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__UpperCAmelCase : Dict = -z * 4
__UpperCAmelCase : Dict = np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] )
__UpperCAmelCase : Union[str, Any] = np.cross(__lowerCamelCase , __lowerCamelCase )
origins.append(__lowerCamelCase )
xs.append(__lowerCamelCase )
ys.append(__lowerCamelCase )
zs.append(__lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCamelCase )) , )
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
a : Optional[Any] = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
a : int = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class a :
"""simple docstring"""
def __init__( self : int ) -> Optional[Any]:
__UpperCAmelCase : Tuple = WATERMARK_BITS
__UpperCAmelCase : Tuple = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def UpperCAmelCase ( self : Dict , __lowercase : torch.FloatTensor ) -> Optional[Any]:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__UpperCAmelCase : Tuple = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__UpperCAmelCase : Tuple = [self.encoder.encode(__lowercase , """dwtDct""" ) for image in images]
__UpperCAmelCase : int = torch.from_numpy(np.array(__lowercase ) ).permute(0 , 3 , 1 , 2 )
__UpperCAmelCase : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 63 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = 0
def UpperCAmelCase ( self : Optional[int] ) -> Any:
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[Any] = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Dict = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Any ) -> Any:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Tuple = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCAmelCase : Union[str, Any] = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Optional[Any] = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__lowercase ).to_dict()
config_dict.pop("""image_processor_type""" )
__UpperCAmelCase : List[Any] = CLIPImageProcessor(**__lowercase )
# save in new folder
model_config.save_pretrained(__lowercase )
config.save_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(__lowercase )
# make sure private variable is not incorrectly saved
__UpperCAmelCase : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[int] = Path(__lowercase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , """clip-base is not a local folder and is not a valid model identifier""" ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
with self.assertRaisesRegex(
__lowercase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(__lowercase , revision="""aaaaaa""" )
def UpperCAmelCase ( self : Dict ) -> Any:
with self.assertRaisesRegex(
__lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__UpperCAmelCase : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoImageProcessor.register(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Any = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : List[Any] = CustomImageProcessor.from_pretrained(__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self : Optional[Any] ) -> str:
class a ( lowercase__ ):
"""simple docstring"""
a : Any = True
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 63 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__UpperCAmelCase : Dict = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCamelCase ) )
return round(__lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 1 |
from __future__ import annotations
a : Optional[int] = list[tuple[int, int]]
a : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a :
"""simple docstring"""
def __init__( self : Optional[int] , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : float , __lowercase : Node | None , ) -> List[str]:
__UpperCAmelCase : Dict = pos_x
__UpperCAmelCase : Optional[int] = pos_y
__UpperCAmelCase : str = (pos_y, pos_x)
__UpperCAmelCase : Dict = goal_x
__UpperCAmelCase : List[Any] = goal_y
__UpperCAmelCase : List[Any] = g_cost
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[Any] = self.calculate_heuristic()
def UpperCAmelCase ( self : str ) -> float:
__UpperCAmelCase : Tuple = abs(self.pos_x - self.goal_x )
__UpperCAmelCase : Optional[int] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[int] , __lowercase : List[Any] ) -> bool:
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self : List[str] , __lowercase : tuple[int, int] , __lowercase : tuple[int, int] ) -> Optional[int]:
__UpperCAmelCase : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase )
__UpperCAmelCase : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowercase )
__UpperCAmelCase : int = [self.start]
__UpperCAmelCase : list[Node] = []
__UpperCAmelCase : List[Any] = False
def UpperCAmelCase ( self : Any ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__UpperCAmelCase : List[str] = True
return self.retrace_path(__lowercase )
self.closed_nodes.append(__lowercase )
__UpperCAmelCase : List[Any] = self.get_successors(__lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase )
else:
# retrieve the best current path
__UpperCAmelCase : str = self.open_nodes.pop(self.open_nodes.index(__lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase )
else:
self.open_nodes.append(__lowercase )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase ( self : Dict , __lowercase : Node ) -> list[Node]:
__UpperCAmelCase : List[Any] = []
for action in delta:
__UpperCAmelCase : str = parent.pos_x + action[1]
__UpperCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ) )
return successors
def UpperCAmelCase ( self : Optional[int] , __lowercase : Node | None ) -> Path:
__UpperCAmelCase : Optional[Any] = node
__UpperCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a : Union[str, Any] = (0, 0)
a : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
a : Union[str, Any] = GreedyBestFirst(init, goal)
a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a : Union[str, Any] = 2
for elem in grid:
print(elem)
| 63 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
class a :
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__UpperCAmelCase : List[str] = False
def UpperCAmelCase ( self : str , __lowercase : list[str] ) -> None:
for word in words:
self.insert(__lowercase )
def UpperCAmelCase ( self : int , __lowercase : str ) -> None:
__UpperCAmelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
__UpperCAmelCase : List[str] = TrieNode()
__UpperCAmelCase : Any = curr.nodes[char]
__UpperCAmelCase : Dict = True
def UpperCAmelCase ( self : List[Any] , __lowercase : str ) -> bool:
__UpperCAmelCase : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
__UpperCAmelCase : Any = curr.nodes[char]
return curr.is_leaf
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : str ) -> None:
def _delete(__lowercase : TrieNode , __lowercase : str , __lowercase : int ) -> bool:
if index == len(__lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
__UpperCAmelCase : Union[str, Any] = False
return len(curr.nodes ) == 0
__UpperCAmelCase : List[Any] = word[index]
__UpperCAmelCase : int = curr.nodes.get(__lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__UpperCAmelCase : Any = _delete(__lowercase , __lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowercase , 0 )
def lowerCamelCase__ ( __lowerCamelCase : TrieNode , __lowerCamelCase : str ):
if node.is_leaf:
print(__lowerCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(__lowerCamelCase , word + key )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[int] = """banana bananas bandana band apple all beast""".split()
__UpperCAmelCase : int = TrieNode()
root.insert_many(__lowerCamelCase )
# print_words(root, "")
assert all(root.find(__lowerCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : bool ):
print(str(__lowerCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def lowerCamelCase__ ( ):
assert test_trie()
def lowerCamelCase__ ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = ['image_processor', 'tokenizer']
a : List[str] = 'OwlViTImageProcessor'
a : str = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : str , __lowercase : Dict=None , __lowercase : int=None , **__lowercase : Any ) -> Tuple:
__UpperCAmelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowercase , )
__UpperCAmelCase : List[str] = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowercase , __lowercase )
def __call__( self : int , __lowercase : Optional[int]=None , __lowercase : int=None , __lowercase : Any=None , __lowercase : Tuple="max_length" , __lowercase : Any="np" , **__lowercase : Optional[int] ) -> Union[str, Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__lowercase , __lowercase ) or (isinstance(__lowercase , __lowercase ) and not isinstance(text[0] , __lowercase )):
__UpperCAmelCase : Union[str, Any] = [self.tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase , **__lowercase )]
elif isinstance(__lowercase , __lowercase ) and isinstance(text[0] , __lowercase ):
__UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
__UpperCAmelCase : str = max([len(__lowercase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowercase ) != max_num_queries:
__UpperCAmelCase : Tuple = t + [""" """] * (max_num_queries - len(__lowercase ))
__UpperCAmelCase : Optional[int] = self.tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase , **__lowercase )
encodings.append(__lowercase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
__UpperCAmelCase : List[str] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__UpperCAmelCase : List[str] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__UpperCAmelCase : str = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCAmelCase : Tuple = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
__UpperCAmelCase : Tuple = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCAmelCase : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__UpperCAmelCase : Union[str, Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
__UpperCAmelCase : List[str] = BatchEncoding()
__UpperCAmelCase : List[Any] = input_ids
__UpperCAmelCase : List[str] = attention_mask
if query_images is not None:
__UpperCAmelCase : Dict = BatchEncoding()
__UpperCAmelCase : Dict = self.image_processor(
__lowercase , return_tensors=__lowercase , **__lowercase ).pixel_values
__UpperCAmelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCAmelCase : Tuple = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
__UpperCAmelCase : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCAmelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def UpperCAmelCase ( self : Any , *__lowercase : str , **__lowercase : Optional[int] ) -> int:
return self.image_processor.post_process(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Tuple , *__lowercase : Optional[Any] , **__lowercase : Optional[Any] ) -> Dict:
return self.image_processor.post_process_object_detection(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Union[str, Any] , *__lowercase : int , **__lowercase : Optional[Any] ) -> List[str]:
return self.image_processor.post_process_image_guided_detection(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , *__lowercase : int , **__lowercase : List[str] ) -> Dict:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[str] , *__lowercase : Dict , **__lowercase : Union[str, Any] ) -> Tuple:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def UpperCAmelCase ( self : List[str] ) -> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowercase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowercase , )
return self.image_processor
| 63 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a : List[str] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(lowercase__ )
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'rag'
a : Dict = True
def __init__( self : List[str] , __lowercase : Union[str, Any]=None , __lowercase : Any=True , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Optional[int]=None , __lowercase : str=None , __lowercase : Optional[Any]=" / " , __lowercase : Union[str, Any]=" // " , __lowercase : Any=5 , __lowercase : str=300 , __lowercase : Any=768 , __lowercase : List[str]=8 , __lowercase : Union[str, Any]="wiki_dpr" , __lowercase : Any="train" , __lowercase : int="compressed" , __lowercase : List[str]=None , __lowercase : Optional[int]=None , __lowercase : Any=False , __lowercase : List[Any]=False , __lowercase : str=0.0 , __lowercase : Optional[Any]=True , __lowercase : int=False , __lowercase : List[Any]=False , __lowercase : Union[str, Any]=False , __lowercase : str=True , __lowercase : List[str]=None , **__lowercase : Optional[Any] , ) -> Dict:
super().__init__(
bos_token_id=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , is_encoder_decoder=__lowercase , prefix=__lowercase , vocab_size=__lowercase , **__lowercase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCAmelCase : Dict = kwargs.pop("""question_encoder""" )
__UpperCAmelCase : List[Any] = question_encoder_config.pop("""model_type""" )
__UpperCAmelCase : int = kwargs.pop("""generator""" )
__UpperCAmelCase : Union[str, Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__UpperCAmelCase : Optional[Any] = AutoConfig.for_model(__lowercase , **__lowercase )
__UpperCAmelCase : Tuple = AutoConfig.for_model(__lowercase , **__lowercase )
__UpperCAmelCase : Tuple = reduce_loss
__UpperCAmelCase : Union[str, Any] = label_smoothing
__UpperCAmelCase : str = exclude_bos_score
__UpperCAmelCase : Tuple = do_marginalize
__UpperCAmelCase : List[str] = title_sep
__UpperCAmelCase : Any = doc_sep
__UpperCAmelCase : List[Any] = n_docs
__UpperCAmelCase : int = max_combined_length
__UpperCAmelCase : Optional[int] = dataset
__UpperCAmelCase : List[Any] = dataset_split
__UpperCAmelCase : List[str] = index_name
__UpperCAmelCase : Optional[int] = retrieval_vector_size
__UpperCAmelCase : str = retrieval_batch_size
__UpperCAmelCase : Optional[int] = passages_path
__UpperCAmelCase : Optional[int] = index_path
__UpperCAmelCase : Optional[Any] = use_dummy_dataset
__UpperCAmelCase : Optional[Any] = output_retrieved
__UpperCAmelCase : Optional[Any] = do_deduplication
__UpperCAmelCase : int = use_cache
if self.forced_eos_token_id is None:
__UpperCAmelCase : Any = getattr(self.generator , """forced_eos_token_id""" , __lowercase )
@classmethod
def UpperCAmelCase ( cls : Tuple , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : Optional[Any] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__lowercase )
def UpperCAmelCase ( self : int ) -> int:
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.question_encoder.to_dict()
__UpperCAmelCase : Dict = self.generator.to_dict()
__UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 63 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
a : List[str] = logging.get_logger(__name__)
a : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a : Any = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
a : List[str] = {
"yjernite/retribert-base-uncased": 512,
}
a : int = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class a ( lowercase__ ):
"""simple docstring"""
a : int = VOCAB_FILES_NAMES
a : List[str] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = PRETRAINED_INIT_CONFIGURATION
a : Dict = RetriBertTokenizer
a : Any = ['input_ids', 'attention_mask']
def __init__( self : Dict , __lowercase : Optional[Any]=None , __lowercase : Any=None , __lowercase : Optional[int]=True , __lowercase : Tuple="[UNK]" , __lowercase : List[str]="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : str="[CLS]" , __lowercase : List[Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : Optional[Any]=None , **__lowercase : List[str] , ) -> Optional[int]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase : List[str] = getattr(__lowercase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : str = do_lower_case
__UpperCAmelCase : Optional[int] = strip_accents
__UpperCAmelCase : Optional[int] = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**__lowercase )
__UpperCAmelCase : str = do_lower_case
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Any , __lowercase : Optional[int]=None ) -> Union[str, Any]:
__UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : str = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Any = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a : Optional[Any] = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
a : Tuple = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
a : List[str] = soup.find("meta", {"property": "og:image"})["content"]
a : Optional[Any] = requests.get(image_url).content
a : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 63 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( __lowerCamelCase : Dict="" ):
__UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
return os.path.join(__lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
__UpperCAmelCase : Union[str, Any] = AgentAudio(__lowercase )
__UpperCAmelCase : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowercase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__lowercase ) )
# Ensure that the file contains the same value as the original tensor
__UpperCAmelCase , __UpperCAmelCase : List[str] = sf.read(__lowercase )
self.assertTrue(torch.allclose(__lowercase , torch.tensor(__lowercase ) , atol=1e-4 ) )
def UpperCAmelCase ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__UpperCAmelCase : List[str] = get_new_path(suffix=""".wav""" )
sf.write(__lowercase , __lowercase , 16000 )
__UpperCAmelCase : str = AgentAudio(__lowercase )
self.assertTrue(torch.allclose(__lowercase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , __lowercase )
@require_vision
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : int ) -> int:
__UpperCAmelCase : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
__UpperCAmelCase : List[str] = AgentImage(__lowercase )
__UpperCAmelCase : Dict = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowercase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowercase ) )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__UpperCAmelCase : Union[str, Any] = Image.open(__lowercase )
__UpperCAmelCase : Any = AgentImage(__lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowercase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : str = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__UpperCAmelCase : Any = Image.open(__lowercase )
__UpperCAmelCase : Any = AgentImage(__lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowercase ) )
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple ) -> List[str]:
__UpperCAmelCase : Optional[int] = """Hey!"""
__UpperCAmelCase : int = AgentText(__lowercase )
self.assertEqual(__lowercase , agent_type.to_string() )
self.assertEqual(__lowercase , agent_type.to_raw() )
self.assertEqual(__lowercase , __lowercase )
| 63 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a : Optional[int] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 1 |
from ...configuration_utils import PretrainedConfig
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''bert-generation'''
def __init__( self , __lowerCAmelCase=5_0_3_5_8 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=2_4 , __lowerCAmelCase=1_6 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :int = vocab_size
__magic_name__ :Tuple = hidden_size
__magic_name__ :Optional[int] = num_hidden_layers
__magic_name__ :str = num_attention_heads
__magic_name__ :Any = hidden_act
__magic_name__ :int = intermediate_size
__magic_name__ :Dict = hidden_dropout_prob
__magic_name__ :Dict = attention_probs_dropout_prob
__magic_name__ :str = max_position_embeddings
__magic_name__ :Optional[int] = initializer_range
__magic_name__ :Union[str, Any] = layer_norm_eps
__magic_name__ :Tuple = position_embedding_type
__magic_name__ :Dict = use_cache
| 0 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 0 |
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Tuple = JukeboxTokenizer
a__ : Union[str, Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ ( self : str ) -> Union[str, Any]:
import torch
_A = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
_A = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
_A = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 2 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 0 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A_( A : str , A : str , A : str):
def get_masked_lm_array(A : str):
UpperCamelCase = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase = tf.train.load_variable(A , A)
if "kernel" in name:
UpperCamelCase = array.transpose()
return torch.from_numpy(A)
def get_encoder_array(A : str):
UpperCamelCase = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase = tf.train.load_variable(A , A)
if "kernel" in name:
UpperCamelCase = array.transpose()
return torch.from_numpy(A)
def get_encoder_layer_array(A : int , A : str):
UpperCamelCase = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase = tf.train.load_variable(A , A)
if "kernel" in name:
UpperCamelCase = array.transpose()
return torch.from_numpy(A)
def get_encoder_attention_layer_array(A : int , A : str , A : List[str]):
UpperCamelCase = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase = tf.train.load_variable(A , A)
UpperCamelCase = array.reshape(A)
if "kernel" in name:
UpperCamelCase = array.transpose()
return torch.from_numpy(A)
print(f'''Loading model based on config from {config_path}...''')
UpperCamelCase = BertConfig.from_json_file(A)
UpperCamelCase = BertForMaskedLM(A)
# Layers
for layer_index in range(0 , config.num_hidden_layers):
UpperCamelCase = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCamelCase = layer.attention.self
UpperCamelCase = get_encoder_attention_layer_array(
A , '_query_dense/kernel' , self_attn.query.weight.data.shape)
UpperCamelCase = get_encoder_attention_layer_array(
A , '_query_dense/bias' , self_attn.query.bias.data.shape)
UpperCamelCase = get_encoder_attention_layer_array(
A , '_key_dense/kernel' , self_attn.key.weight.data.shape)
UpperCamelCase = get_encoder_attention_layer_array(
A , '_key_dense/bias' , self_attn.key.bias.data.shape)
UpperCamelCase = get_encoder_attention_layer_array(
A , '_value_dense/kernel' , self_attn.value.weight.data.shape)
UpperCamelCase = get_encoder_attention_layer_array(
A , '_value_dense/bias' , self_attn.value.bias.data.shape)
# Self-attention Output
UpperCamelCase = layer.attention.output
UpperCamelCase = get_encoder_attention_layer_array(
A , '_output_dense/kernel' , self_output.dense.weight.data.shape)
UpperCamelCase = get_encoder_attention_layer_array(
A , '_output_dense/bias' , self_output.dense.bias.data.shape)
UpperCamelCase = get_encoder_layer_array(A , '_attention_layer_norm/gamma')
UpperCamelCase = get_encoder_layer_array(A , '_attention_layer_norm/beta')
# Intermediate
UpperCamelCase = layer.intermediate
UpperCamelCase = get_encoder_layer_array(A , '_intermediate_dense/kernel')
UpperCamelCase = get_encoder_layer_array(A , '_intermediate_dense/bias')
# Output
UpperCamelCase = layer.output
UpperCamelCase = get_encoder_layer_array(A , '_output_dense/kernel')
UpperCamelCase = get_encoder_layer_array(A , '_output_dense/bias')
UpperCamelCase = get_encoder_layer_array(A , '_output_layer_norm/gamma')
UpperCamelCase = get_encoder_layer_array(A , '_output_layer_norm/beta')
# Embeddings
UpperCamelCase = get_encoder_array('_position_embedding_layer/embeddings')
UpperCamelCase = get_encoder_array('_type_embedding_layer/embeddings')
UpperCamelCase = get_encoder_array('_embedding_norm_layer/gamma')
UpperCamelCase = get_encoder_array('_embedding_norm_layer/beta')
# LM Head
UpperCamelCase = model.cls.predictions.transform
UpperCamelCase = get_masked_lm_array('dense/kernel')
UpperCamelCase = get_masked_lm_array('dense/bias')
UpperCamelCase = get_masked_lm_array('layer_norm/gamma')
UpperCamelCase = get_masked_lm_array('layer_norm/beta')
UpperCamelCase = get_masked_lm_array('embedding_table')
# Pooling
UpperCamelCase = BertPooler(config=A)
UpperCamelCase = get_encoder_array('_pooler_layer/kernel')
UpperCamelCase = get_encoder_array('_pooler_layer/bias')
# Export final model
model.save_pretrained(A)
# Integration test - should load without any errors ;)
UpperCamelCase = BertForMaskedLM.from_pretrained(A)
print(new_model.eval())
print('Model conversion was done sucessfully!')
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
lowerCAmelCase : str = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 3 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = F'class {class_name}('
lowerCAmelCase = F'{4 * " "}def {test_name}('
lowerCAmelCase = F'{8 * " "}{correct_line.split()[0]}'
lowerCAmelCase = F'{16 * " "}{correct_line.split()[0]}'
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = []
for line in lines:
if line.startswith(_UpperCAmelCase ):
lowerCAmelCase = True
elif in_class and line.startswith(_UpperCAmelCase ):
lowerCAmelCase = True
elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )):
lowerCAmelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = False
else:
new_lines.append(_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' ) as f:
for line in new_lines:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ):
if fail is not None:
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase = None
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = defaultdict(_UpperCAmelCase )
for line in correct_lines:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__UpperCamelCase : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def A (__lowerCamelCase :str ):
_lowerCAmelCase = {}
_lowerCAmelCase = os.path.join(__lowerCamelCase , """all_results.json""" )
if os.path.exists(__lowerCamelCase ):
with open(__lowerCamelCase , """r""" ) as f:
_lowerCAmelCase = json.load(__lowerCamelCase )
else:
raise ValueError(f'can\'t find {path}' )
return results
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
import xla_spawn
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_lowercase , """argv""" , _lowercase ):
_lowerCAmelCase = time()
xla_spawn.main()
_lowerCAmelCase = time()
_lowerCAmelCase = get_results(_lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _lowercase ( self ):
"""simple docstring"""
import xla_spawn
_lowerCAmelCase = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(_lowercase , """argv""" , _lowercase ):
xla_spawn.main()
| 5 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
SCREAMING_SNAKE_CASE__ = mam_aaa["""model"""]
remove_ignore_keys_(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
SCREAMING_SNAKE_CASE__ = MaMaaaConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
SCREAMING_SNAKE_CASE__ = state_dict["""decoder.embed_tokens.weight"""]
SCREAMING_SNAKE_CASE__ = MaMaaaForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 6 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
_A = ''
while len(_snake_case ) % 3 != 0:
_A = '0' + bin_string
_A = [
bin_string[index : index + 3]
for index in range(len(_snake_case ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_A = 0
for index, val in enumerate(_snake_case ):
oct_val += int(2 ** (2 - index) * int(_snake_case ) )
oct_string += str(_snake_case )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 7 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Tuple ) -> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase__ : List[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = pipeline(
'document-question-answering' , model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Optional[int] = INVOICE_URL
__A : Any = list(zip(*apply_tesseract(load_image(_UpperCAmelCase) , _UpperCAmelCase , '')))
__A : Tuple = 'What is the placebo?'
__A : List[Any] = [
{
'image': load_image(_UpperCAmelCase),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = dqa_pipeline(_UpperCAmelCase , top_k=2)
self.assertEqual(
_UpperCAmelCase , [
[
{'score': ANY(_UpperCAmelCase), 'answer': ANY(_UpperCAmelCase), 'start': ANY(_UpperCAmelCase), 'end': ANY(_UpperCAmelCase)},
{'score': ANY(_UpperCAmelCase), 'answer': ANY(_UpperCAmelCase), 'start': ANY(_UpperCAmelCase), 'end': ANY(_UpperCAmelCase)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2')
__A : Any = INVOICE_URL
__A : List[str] = 'How many cats are there?'
__A : Union[str, Any] = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
__A : Optional[Any] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(nested_simplify(_UpperCAmelCase , decimals=4) , _UpperCAmelCase)
__A : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(nested_simplify(_UpperCAmelCase , decimals=4) , _UpperCAmelCase)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__A : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
__A : List[str] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(_UpperCAmelCase , [])
# We can optionnally pass directly the words and bounding boxes
__A : Tuple = './tests/fixtures/tests_samples/COCO/000000039769.png'
__A : str = []
__A : str = []
__A : Any = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , words=_UpperCAmelCase , boxes=_UpperCAmelCase , top_k=2)
self.assertEqual(_UpperCAmelCase , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
__A : Optional[Any] = INVOICE_URL
__A : int = 'What is the invoice number?'
__A : Tuple = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Optional[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
__A : Optional[int] = INVOICE_URL
__A : List[str] = 'What is the invoice number?'
__A : List[str] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCAmelCase)
__A : List[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCAmelCase , revision='3dc6de3' , )
__A : Tuple = INVOICE_URL
__A : List[Any] = 'What is the invoice number?'
__A : Dict = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__A : List[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__A : Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
__A : Tuple = list(zip(*apply_tesseract(load_image(_UpperCAmelCase) , _UpperCAmelCase , '')))
# This model should also work if `image` is set to None
__A : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCAmelCase)
__A : Dict = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCAmelCase , revision='3dc6de3' , max_seq_len=50 , )
__A : str = INVOICE_URL
__A : List[Any] = 'What is the invoice number?'
__A : Tuple = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__A : Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
__A : Tuple = list(zip(*apply_tesseract(load_image(_UpperCAmelCase) , _UpperCAmelCase , '')))
# This model should also work if `image` is set to None
__A : Any = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa') , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
__A : int = INVOICE_URL
__A : Union[str, Any] = 'What is the invoice number?'
__A : List[Any] = dqa_pipeline(image=_UpperCAmelCase , question=_UpperCAmelCase , top_k=2)
self.assertEqual(nested_simplify(_UpperCAmelCase , decimals=4) , [{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass | 8 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
from __future__ import annotations
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[float, list[float]]:
A__ = list(range(len(__UpperCamelCase ) ) )
A__ = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
A__ = 0
A__ = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 0 |
def _snake_case ( ):
_UpperCamelCase = 0
for i in range(1 , 1001 ):
total += i**i
return str(__snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 10 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
from math import factorial
def UpperCamelCase ( lowercase_ = 20 ) -> int:
'''simple docstring'''
lowercase__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase__ : Union[str, Any] = n // 2
return int(factorial(lowercase_ ) / (factorial(lowercase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
lowerCamelCase__ : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 12 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
def wrapper(*UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
__lowerCamelCase : Tuple = timeit.default_timer()
__lowerCamelCase : List[str] = func(*UpperCAmelCase_ , **UpperCAmelCase_ )
__lowerCamelCase : List[str] = timeit.default_timer() - starttime
return delta
__lowerCamelCase : int = func.__name__
return wrapper
def UpperCAmelCase__ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : Optional[Any]=1_00 , UpperCAmelCase_ : Union[str, Any]=None ) -> List[str]:
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Any = seq_shapes or {}
for i in range(UpperCAmelCase_ ):
__lowerCamelCase : Dict = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
__lowerCamelCase : Tuple = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
__lowerCamelCase : Optional[Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
__lowerCamelCase : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
__lowerCamelCase : Optional[int] = v.feature
__lowerCamelCase : Union[str, Any] = seq_shapes[k]
__lowerCamelCase : int = np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
__lowerCamelCase : Tuple = data
dummy_data.append((i, example) )
return dummy_data
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=1_00 , UpperCAmelCase_ : Tuple=None ) -> Any:
__lowerCamelCase : Optional[int] = generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
__lowerCamelCase : Union[str, Any] = features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__lowerCamelCase : Dict = datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 13 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ = logging.get_logger(__name__)
a__ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "detr"
UpperCAmelCase__ : int = ["past_key_values"]
UpperCAmelCase__ : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _a=True , _a=None , _a=3 , _a=1_0_0 , _a=6 , _a=2_0_4_8 , _a=8 , _a=6 , _a=2_0_4_8 , _a=8 , _a=0.0 , _a=0.0 , _a=True , _a="relu" , _a=2_5_6 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , **_a , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_a , _a ):
_a : Union[str, Any] = backbone_config.get('''model_type''' )
_a : int = CONFIG_MAPPING[backbone_model_type]
_a : Optional[int] = config_class.from_dict(_a )
# set timm attributes to None
_a , _a , _a : Any = None, None, None
_a : int = use_timm_backbone
_a : int = backbone_config
_a : Optional[int] = num_channels
_a : int = num_queries
_a : Dict = d_model
_a : Optional[Any] = encoder_ffn_dim
_a : Any = encoder_layers
_a : Optional[Any] = encoder_attention_heads
_a : Dict = decoder_ffn_dim
_a : Optional[Any] = decoder_layers
_a : Optional[int] = decoder_attention_heads
_a : str = dropout
_a : List[Any] = attention_dropout
_a : Dict = activation_dropout
_a : Union[str, Any] = activation_function
_a : Optional[Any] = init_std
_a : Dict = init_xavier_std
_a : Any = encoder_layerdrop
_a : List[str] = decoder_layerdrop
_a : int = encoder_layers
_a : int = auxiliary_loss
_a : Optional[Any] = position_embedding_type
_a : str = backbone
_a : List[Any] = use_pretrained_backbone
_a : Any = dilation
# Hungarian matcher
_a : str = class_cost
_a : int = bbox_cost
_a : List[str] = giou_cost
# Loss coefficients
_a : Union[str, Any] = mask_loss_coefficient
_a : Optional[Any] = dice_loss_coefficient
_a : List[str] = bbox_loss_coefficient
_a : List[Any] = giou_loss_coefficient
_a : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __lowercase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowercase ( self ) -> int:
return self.d_model
@classmethod
def __lowercase ( cls , _a , **_a ) -> Optional[int]:
return cls(backbone_config=_a , **_a )
def __lowercase ( self ) -> Dict[str, any]:
_a : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a : Optional[Any] = self.backbone_config.to_dict()
_a : str = self.__class__.model_type
return output
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __lowercase ( self ) -> float:
return 1e-5
@property
def __lowercase ( self ) -> int:
return 1_2
| 14 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 0 |
import warnings
from .generation import TFGenerationMixin
class A ( UpperCAmelCase__ ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , UpperCAmelCase__ , )
| 15 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : Dict = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['OwlViTFeatureExtractor']
__A : str = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
import numpy as np
class lowerCamelCase_ :
def __init__( self : Union[str, Any] ):
__A : Union[str, Any] = (0, 0)
__A : Optional[Any] = None
__A : int = 0
__A : List[Any] = 0
__A : Any = 0
def __eq__( self : str , __A : Dict ):
return self.position == cell.position
def lowerCAmelCase_ ( self : int ):
print(self.position )
class lowerCamelCase_ :
def __init__( self : Dict , __A : List[str]=(5, 5) ):
__A : str = np.zeros(__A )
__A : str = world_size[0]
__A : Union[str, Any] = world_size[1]
def lowerCAmelCase_ ( self : Dict ):
print(self.w )
def lowerCAmelCase_ ( self : int , __A : str ):
__A : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__A : Optional[int] = cell.position[0]
__A : List[str] = cell.position[1]
__A : Optional[int] = []
for n in neughbour_cord:
__A : Optional[int] = current_x + n[0]
__A : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__A : int = Cell()
__A : Optional[Any] = (x, y)
__A : Optional[Any] = cell
neighbours.append(__A )
return neighbours
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Optional[int] ,a__ : List[str] ) -> Any:
__A : Dict = []
__A : Dict = []
_open.append(a__ )
while _open:
__A : List[str] = np.argmin([n.f for n in _open] )
__A : Dict = _open[min_f]
_closed.append(_open.pop(a__ ) )
if current == goal:
break
for n in world.get_neigbours(a__ ):
for c in _closed:
if c == n:
continue
__A : Tuple = current.g + 1
__A , __A : int = n.position
__A , __A : str = goal.position
__A : Optional[int] = (ya - ya) ** 2 + (xa - xa) ** 2
__A : Any = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a__ )
__A : Any = []
while current.parent is not None:
path.append(current.position )
__A : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = Gridworld()
# Start position and goal
UpperCAmelCase_ : Any = Cell()
UpperCAmelCase_ : Tuple = (0, 0)
UpperCAmelCase_ : Tuple = Cell()
UpperCAmelCase_ : str = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
UpperCAmelCase_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Optional[int] = 1
print(world.w)
| 17 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = 11
_lowerCAmelCase = int("1" + "0" * digit_len )
for num in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_lowerCAmelCase = 10
return solutions
def __a(SCREAMING_SNAKE_CASE_ : int = 2 ):
'''simple docstring'''
_lowerCAmelCase = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = Fraction(SCREAMING_SNAKE_CASE_ )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 18 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase:
def __init__( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
def lowerCamelCase__ ( __snake_case ) -> None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase__ ( ) -> None: # Main function for testing.
"""simple docstring"""
_UpperCamelCase = Node(1 )
_UpperCamelCase = Node(2 )
_UpperCamelCase = Node(3 )
_UpperCamelCase = Node(4 )
_UpperCamelCase = Node(5 )
_UpperCamelCase = Node(6 )
_UpperCamelCase = Node(7 )
_UpperCamelCase = Node(8 )
_UpperCamelCase = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print('''Tree is: ''' )
display(__snake_case )
if __name__ == "__main__":
main()
| 19 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase: List[str] = get_tests_dir('fixtures')
_lowerCAmelCase: List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_lowerCAmelCase: Optional[Any] = get_tests_dir('fixtures/dummy-config.json')
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> Tuple:
a__ =0
def __UpperCamelCase ( self) -> List[str]:
a__ =AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ =WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a__ =AutoFeatureExtractor.from_pretrained(lowercase_).to_dict()
config_dict.pop('feature_extractor_type')
a__ =WavaVecaFeatureExtractor(**lowercase_)
# save in new folder
model_config.save_pretrained(lowercase_)
config.save_pretrained(lowercase_)
a__ =AutoFeatureExtractor.from_pretrained(lowercase_)
# make sure private variable is not incorrectly saved
a__ =json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> int:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier'):
a__ =AutoFeatureExtractor.from_pretrained('bert-base')
def __UpperCamelCase ( self) -> List[Any]:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__ =AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa')
def __UpperCamelCase ( self) -> List[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
a__ =AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model')
def __UpperCamelCase ( self) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_):
a__ =AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_):
a__ =AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
a__ =AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_)
a__ =AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
def __UpperCamelCase ( self) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_):
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# Now that the config is registered, it can be used as any other config with the auto-API
a__ =CustomFeatureExtractor.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_)
a__ =AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase ( self) -> List[Any]:
class lowercase_ (lowercase__ ):
snake_case =True
try:
AutoConfig.register('custom' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# If remote code is not set, the default is to use local
a__ =AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
a__ =AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
a__ =AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(not hasattr(lowercase_ , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 20 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 0 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 0 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> int:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def _UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
UpperCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase_ = DDPMScheduler()
UpperCamelCase_ = AudioDiffusionPipeline(vqvae=_UpperCAmelCase , unet=self.dummy_unet , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
UpperCamelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCamelCase_ = pipe(generator=_UpperCAmelCase , steps=4 )
UpperCamelCase_ = output.audios[0]
UpperCamelCase_ = output.images[0]
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCamelCase_ = pipe(generator=_UpperCAmelCase , steps=4 , return_dict=_UpperCAmelCase )
UpperCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
UpperCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase_ = DDIMScheduler()
UpperCamelCase_ = self.dummy_vqvae_and_unet
UpperCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
UpperCamelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
UpperCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCamelCase_ = pipe(raw_audio=_UpperCAmelCase , generator=_UpperCAmelCase , start_step=5 , steps=10 )
UpperCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase_ = self.dummy_unet_condition
UpperCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCAmelCase , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
UpperCamelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
UpperCamelCase_ = torch.rand((1, 1, 10) )
UpperCamelCase_ = pipe(generator=_UpperCAmelCase , encoding=_UpperCAmelCase )
UpperCamelCase_ = output.images[0]
UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = torch_device
UpperCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
UpperCamelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
UpperCamelCase_ = pipe(generator=_UpperCAmelCase )
UpperCamelCase_ = output.audios[0]
UpperCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 23 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCamelCase (_lowerCamelCase : Namespace )-> List[str]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase_ : str = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class lowerCAmelCase ( __lowerCAmelCase):
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
__snake_case = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
__snake_case = model_type
__snake_case = tf_checkpoint
__snake_case = pytorch_dump_output
__snake_case = config
__snake_case = finetuning_task_name
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
if "ckpt" in self._tf_checkpoint.lower():
__snake_case = self._tf_checkpoint
__snake_case = ''''''
else:
__snake_case = self._tf_checkpoint
__snake_case = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
__SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , __SCREAMING_SNAKE_CASE )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__SCREAMING_SNAKE_CASE )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 24 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( _a):
return (data["data"], data["target"])
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Tuple = XGBClassifier()
classifier.fit(_a , _a)
return classifier
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = load_iris()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = data_handling(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = train_test_split(
_a , _a , test_size=0.25)
SCREAMING_SNAKE_CASE : str = iris["target_names"]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE : Any = xgboost(_a , _a)
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_a , _a , _a , display_labels=_a , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset")
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 25 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 100 ) -> int:
"""simple docstring"""
_A = n * (n + 1) * (2 * n + 1) / 6
_A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
'''simple docstring'''
A : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
A : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Optional[Any] = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : List[Any] = processor.get_labels()
SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
SCREAMING_SNAKE_CASE : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase: EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE : Any = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase ,p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : str = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
SCREAMING_SNAKE_CASE : Any = os.path.join(training_args.output_dir ,'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 28 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 0 |
"""simple docstring"""
import string
from math import logaa
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase_ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase_ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCAmelCase__ ))
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return round(tf * idf ,3 )
| 29 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__a = object()
# For specifying empty leaf dict `{}`
__a = object()
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(_lowercase ) - len(_lowercase ) + 1 ):
UpperCAmelCase_ : List[Any] = [x.match(_lowercase ) for x, y in zip(_lowercase , ks[i:] )]
if matches and all(_lowercase ):
return True
return False
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
def replace(_lowercase , _lowercase ):
for rule, replacement in rules:
if _match(_lowercase , _lowercase ):
return replacement
return val
return replace
def lowerCamelCase__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , _lowercase )),
(("transformer", "wte", "embedding"), P('''mp''' , _lowercase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowercase , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , _lowercase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowercase , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , _lowercase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = _get_partition_rules()
UpperCAmelCase_ : List[str] = _replacement_rules(_lowercase )
UpperCAmelCase_ : Dict = {k: _unmatched for k in flatten_dict(_lowercase )}
UpperCAmelCase_ : Optional[Any] = {k: replace(_lowercase , _lowercase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowercase ) ) | 30 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "poolformer"
def __init__( self : List[Any] , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : str=4.0 , _lowerCAmelCase : List[Any]=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , _lowerCAmelCase : Optional[Any]=[7, 3, 3, 3] , _lowerCAmelCase : Union[str, Any]=[4, 2, 2, 2] , _lowerCAmelCase : Optional[int]=[2, 1, 1, 1] , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=1E-5 , _lowerCAmelCase : Optional[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = stride
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = pool_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_layer_scale
SCREAMING_SNAKE_CASE_ = layer_scale_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 2E-3 | 31 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase="divided_space_time" , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = attention_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_UpperCAmelCase = self.num_labels
return config
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TimesformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TimesformerForVideoClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify the logits shape
_UpperCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__A : Union[str, Any] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__A : int = False
__A : List[str] = False
__A : int = False
__A : str = False
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerModelTester(self )
_UpperCAmelCase = ConfigTester(
self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = copy.deepcopy(_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TimesformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
if not self.has_attentions:
pass
else:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.seq_length
_UpperCAmelCase = self.model_tester.num_frames
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_UpperCAmelCase = len(_UpperCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCamelCase( self ):
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_UpperCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def A__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_UpperCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) ) | 32 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 0 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
snake_case__ = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
snake_case__ = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
snake_case__ = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''layoutlmv3'''
def __init__( self , lowerCamelCase_=5_0_2_6_5 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_2_8 , lowerCamelCase_=True , lowerCamelCase_=3_2 , lowerCamelCase_=1_2_8 , lowerCamelCase_=6_4 , lowerCamelCase_=2_5_6 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=2_2_4 , lowerCamelCase_=3 , lowerCamelCase_=1_6 , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = max_ad_position_embeddings
UpperCamelCase = coordinate_size
UpperCamelCase = shape_size
UpperCamelCase = has_relative_attention_bias
UpperCamelCase = rel_pos_bins
UpperCamelCase = max_rel_pos
UpperCamelCase = has_spatial_attention_bias
UpperCamelCase = rel_ad_pos_bins
UpperCamelCase = max_rel_ad_pos
UpperCamelCase = text_embed
UpperCamelCase = visual_embed
UpperCamelCase = input_size
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = classifier_dropout
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = version.parse('''1.12''' )
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
])
@property
def UpperCAmelCase__ ( self) -> float:
return 1e-5
@property
def UpperCAmelCase__ ( self) -> int:
return 1_2
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = -1 , lowerCamelCase_ = -1 , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = 3 , lowerCamelCase_ = 4_0 , lowerCamelCase_ = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_)
UpperCamelCase = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_)
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [[''' '''.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ))
return inputs | 34 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
a_ :Tuple = None
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a_ :List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
a_ :Tuple = {
'camembert-base': 5_12,
}
a_ :Dict = '▁'
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Tuple = CamembertTokenizer
def __init__( self : int , _lowercase : int=None , _lowercase : List[str]=None , _lowercase : Optional[int]="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Tuple="</s>" , _lowercase : str="<s>" , _lowercase : Tuple="<unk>" , _lowercase : str="<pad>" , _lowercase : Dict="<mask>" , _lowercase : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , **_lowercase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = False if not self.vocab_file else True
def lowercase__ ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Any , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 35 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowercase ( __A : Union[str, Any] , __A : Any , __A : Any=0 ) -> Any:
'''simple docstring'''
if name is None:
snake_case : str = None
else:
snake_case : int = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case : Tuple = fmt.format(__A )
# Print and recurse (if needed).
if isinstance(__A , __A ):
if msg is not None:
print(__A )
for k in val.keys():
recursive_print(__A , val[k] , spaces + 2 )
elif isinstance(__A , torch.Tensor ):
print(__A , """:""" , val.size() )
else:
print(__A , """:""" , __A )
def lowercase ( __A : Any , __A : Dict , __A : str , __A : Tuple , __A : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : int = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case : Optional[int] = param.view(*__A )
snake_case : Dict = param.transpose(0 , 2 )
snake_case : int = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case : Union[str, Any] = param.view(*__A )
snake_case : Dict = param.transpose(0 , 1 ).contiguous()
snake_case : Any = param.view(*__A )
return param
def lowercase ( __A : str , __A : Dict , __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = {}
# old versions did not store training args
snake_case : int = input_state_dict.get("""args""" , __A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case : Dict = ds_args.padded_vocab_size
snake_case : Dict = ds_args.max_position_embeddings
snake_case : Optional[Any] = ds_args.hidden_size
snake_case : List[str] = ds_args.num_layers
snake_case : str = ds_args.num_attention_heads
snake_case : Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case : List[str] = config.n_head
# The hidden_size per head.
snake_case : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case : List[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case : int = 0.0
# The model.
snake_case : Optional[int] = input_state_dict["""model"""]
# The language model.
snake_case : Union[str, Any] = model["""language_model"""]
# The embeddings.
snake_case : List[str] = lm["""embedding"""]
# The word embeddings.
snake_case : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case : Union[str, Any] = word_embeddings[: config.vocab_size, :]
snake_case : str = word_embeddings
# The position embeddings.
snake_case : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case : Optional[Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
snake_case : Tuple = pos_embeddings
# The transformer.
snake_case : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case : str = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case : Optional[Any] = layer_re.match(__A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case : List[str] = int(m.group(1 ) )
# The name of the operation.
snake_case : Any = m.group(2 )
# Is it a weight or a bias?
snake_case : Any = m.group(3 )
# The name of the layer.
snake_case : Tuple = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case : Any = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case : Any = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __A , __A )
snake_case : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case : Optional[int] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case : Any = masked_bias
snake_case : Optional[Any] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case : List[str] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Store. No change of shape.
snake_case : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case : str = megatron_to_transformers[op_name]
snake_case : Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case : List[Any] = megatron_to_transformers[op_name]
snake_case : int = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case : List[Any] = transformer["""final_layernorm.weight"""]
snake_case : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def lowercase ( ) -> Any:
'''simple docstring'''
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__A , help="""An optional config json file describing the pre-trained model.""" , )
snake_case : List[Any] = parser.parse_args()
# Extract the basename.
snake_case : Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case : List[str] = torch.load(__A , map_location="""cpu""" )
else:
snake_case : int = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case : Dict = input_state_dict.get("""args""" , __A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case : int = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case : Union[str, Any] = """gelu_new"""
else:
snake_case : int = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case : str = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case : List[Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__A , summary_activation=__A , summary_proj_to_labels=__A , summary_first_dropout=0.1 , scale_attn_weights=__A , use_cache=__A , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
snake_case : int = GPTaConfig.from_json_file(args.config_file )
snake_case : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case : str = convert_megatron_checkpoint(__A , __A , __A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__A , __A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case : Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
snake_case : Any = """gpt2"""
snake_case : List[str] = AutoTokenizer.from_pretrained(__A )
snake_case : Optional[Any] = type(__A ).__name__
snake_case : Tuple = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__A )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(__A )
# Store the state_dict to file.
snake_case : List[str] = os.path.join(__A , """pytorch_model.bin""" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(__A , __A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 36 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 0 |
from math import ceil
def UpperCamelCase_ ( __a = 1_001 ) -> int:
a__ : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
a__ : List[str] = 2 * i + 1
a__ : Optional[int] = 2 * i
a__ : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCamelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 37 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = str(id_ )
snake_case__ : Dict = None
snake_case__ : List[Any] = None
snake_case__ : Optional[int] = []
snake_case__ : Tuple = {} # {vertex:distance}
def __lt__( self , __SCREAMING_SNAKE_CASE ):
return self.key < other.key
def __repr__( self ):
return self.id
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
self.neighbors.append(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = weight
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __magic_name__ )
graph[b - 1].add_edge(graph[a - 1] , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> list:
'''simple docstring'''
snake_case__ : Optional[int] = []
for u in graph:
snake_case__ : str = math.inf
snake_case__ : List[Any] = None
snake_case__ : Dict = 0
snake_case__ : Tuple = graph[:]
while q:
snake_case__ : Any = min(__magic_name__ )
q.remove(__magic_name__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case__ : Optional[int] = u
snake_case__ : Dict = u.edges[v.id]
for i in range(1 , len(__magic_name__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
snake_case__ : Tuple = math.inf
snake_case__ : Tuple = None
snake_case__ : Optional[int] = 0
snake_case__ : str = list(__magic_name__ )
hq.heapify(__magic_name__ )
while h:
snake_case__ : str = hq.heappop(__magic_name__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case__ : Union[str, Any] = u
snake_case__ : Dict = u.edges[v.id]
hq.heapify(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ = logging.getLogger()
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case_ = parser.parse_args()
return args.f
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : Dict ) ->None:
snake_case_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
def snake_case__( self : List[str] , _UpperCamelCase : Any ) ->Dict:
snake_case_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(_UpperCamelCase , '''argv''' , _UpperCamelCase ):
snake_case_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case__( self : Union[str, Any] ) ->Union[str, Any]:
snake_case_ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_UpperCamelCase )
snake_case_ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_UpperCamelCase )
snake_case_ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_UpperCamelCase ) | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = self.task_name.lower()
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = "train"
UpperCAmelCase__ : List[Any] = "dev"
UpperCAmelCase__ : Any = "test"
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = Split.train, SCREAMING_SNAKE_CASE_ = None, ) -> List[Any]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py', SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Dict = args
UpperCamelCase : Any = glue_processors[args.task_name]()
UpperCamelCase : str = glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase : Optional[int] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""", )
UpperCamelCase : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase : Optional[int] = label_list[2], label_list[1]
UpperCamelCase : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : Optional[int] = cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
UpperCamelCase : Tuple = time.time()
UpperCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""", time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase : List[str] = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase : Optional[int] = examples[:limit_length]
UpperCamelCase : Optional[int] = glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, max_length=args.max_seq_length, label_list=SCREAMING_SNAKE_CASE_, output_mode=self.output_mode, )
UpperCamelCase : List[Any] = time.time()
torch.save(self.features, SCREAMING_SNAKE_CASE_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures:
return self.features[i]
def snake_case_ ( self ) -> Optional[int]:
return self.label_list
| 40 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 0 |
'''simple docstring'''
A_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> list[int]:
lowerCamelCase_ = True
lowerCamelCase_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
order.append(__UpperCamelCase )
return order
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> list[int]:
lowerCamelCase_ = True
lowerCamelCase_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return component
def _UpperCamelCase ( __UpperCamelCase ) -> list[list[int]]:
lowerCamelCase_ = len(__UpperCamelCase ) * [False]
lowerCamelCase_ = {vert: [] for vert in range(len(__UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__UpperCamelCase )
lowerCamelCase_ = []
for i, was_visited in enumerate(__UpperCamelCase ):
if not was_visited:
order += topology_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = len(__UpperCamelCase ) * [False]
for i in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = order[len(__UpperCamelCase ) - i - 1]
if not visited[vert]:
lowerCamelCase_ = find_components(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
components_list.append(__UpperCamelCase )
return components_list
| 42 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _a ( SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE=10_26 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase__ = load_gpta('''gpt2''' ).to(SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=1_28 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(SCREAMING_SNAKE_CASE )
# Train secondary learner
lowercase__ = train_secondary_learner(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10_00 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ):
"""simple docstring"""
lowercase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase__ = RandomSampler(SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE )
lowercase__ = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
for epoch in range(int(SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase__ = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase__ = training_secondary_learner(
SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 43 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'MCTCTFeatureExtractor'
lowerCAmelCase_ = 'AutoTokenizer'
def __init__( self : Union[str, Any],__A : int,__A : Tuple ):
super().__init__(__A,__A )
_lowerCamelCase : Any = self.feature_extractor
_lowerCamelCase : Dict = False
def __call__( self : Optional[Any],*__A : int,**__A : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A,**__A )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_lowerCamelCase : int = kwargs.pop("raw_speech" )
else:
_lowerCamelCase : int = kwargs.pop("audio",__A )
_lowerCamelCase : Optional[Any] = kwargs.pop("sampling_rate",__A )
_lowerCamelCase : Tuple = kwargs.pop("text",__A )
if len(__A ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_lowerCamelCase : Tuple = self.feature_extractor(__A,*__A,sampling_rate=__A,**__A )
if text is not None:
_lowerCamelCase : Optional[Any] = self.tokenizer(__A,**__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : str = encodings["input_ids"]
return inputs
def lowerCamelCase_ ( self : Optional[Any],*__A : Tuple,**__A : List[str] ):
return self.tokenizer.batch_decode(*__A,**__A )
def lowerCamelCase_ ( self : Union[str, Any],*__A : List[Any],**__A : Optional[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__A,**__A )
_lowerCamelCase : Optional[int] = kwargs.pop("input_features",__A )
_lowerCamelCase : int = kwargs.pop("labels",__A )
if len(__A ) > 0:
_lowerCamelCase : str = args[0]
_lowerCamelCase : str = args[1:]
if input_features is not None:
_lowerCamelCase : Tuple = self.feature_extractor.pad(__A,*__A,**__A )
if labels is not None:
_lowerCamelCase : Optional[int] = self.tokenizer.pad(__A,**__A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCamelCase : List[Any] = labels["input_ids"]
return input_features
def lowerCamelCase_ ( self : int,*__A : Dict,**__A : List[str] ):
return self.tokenizer.decode(*__A,**__A )
@contextmanager
def lowerCamelCase_ ( self : List[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Union[str, Any] = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Optional[Any] = False | 44 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 0 |
from math import factorial
def A ( lowercase__ : int , lowercase__ : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowercase__ ) // (factorial(lowercase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
) | 45 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 46 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCamelCase( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Any = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
__a : int = AutoTokenizer.from_pretrained('google/mt5-small' )
__a : Any = tokenizer('Hello there' , return_tensors='np' ).input_ids
__a : Any = tokenizer('Hi I am' , return_tensors='np' ).input_ids
__a : List[str] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__a : Union[str, Any] = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Optional[Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 47 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 0 |
'''simple docstring'''
def A ( ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
while len(UpperCamelCase_ ) < 1E6:
constant.append(str(UpperCamelCase_ ) )
i += 1
lowerCAmelCase__ = "".join(UpperCamelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 48 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : int , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def a ( self : str ):
__UpperCAmelCase = None
__UpperCAmelCase = 20
__UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=_lowercase )
# tweak scores to not be uniform anymore
__UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__UpperCAmelCase = jax.nn.softmax(_lowercase , axis=-1 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a ( self : List[Any] ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create ramp distribution
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy()
__UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, length) ).copy()
__UpperCAmelCase = top_k_warp_safety_check(_lowercase , _lowercase , cur_len=_lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a ( self : str ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
__UpperCAmelCase = np.exp(top_p_warp(_lowercase , _lowercase , cur_len=_lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__UpperCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a ( self : List[str] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
# check that min length is applied at length 5
__UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__UpperCAmelCase = 5
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = 15
__UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : List[Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
# check that all scores are -inf except the bos_token_id score
__UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__UpperCAmelCase = 1
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : Optional[int] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
__UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__UpperCAmelCase = 4
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : Any ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
__UpperCAmelCase = 10
# no processor list
__UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
# with processor list
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a ( self : int ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
__UpperCAmelCase = 10
# no processor list
def run_no_processor_list(_lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
return scores
# with processor list
def run_processor_list(_lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : int ):
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
return scores
__UpperCAmelCase = jax.jit(_lowercase )
__UpperCAmelCase = jax.jit(_lowercase )
__UpperCAmelCase = jitted_run_no_processor_list(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = jitted_run_processor_list(_lowercase , _lowercase , _lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'sew'
def __init__( self ,_lowerCAmelCase=32 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=2 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase="group" ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) ,_lowerCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_lowerCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_lowerCAmelCase=False ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.05 ,_lowerCAmelCase=10 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0 ,_lowerCAmelCase="mean" ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=0 ,_lowerCAmelCase=1 ,_lowerCAmelCase=2 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim )
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = squeeze_factor
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# sequence classification
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
@property
def UpperCamelCase_ ( self ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 50 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 0 |
'''simple docstring'''
import string
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
UpperCAmelCase = ''''''
for i in sequence:
UpperCAmelCase = ord(SCREAMING_SNAKE_CASE_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
UpperCAmelCase = string.ascii_letters
UpperCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE_ )] if c in letters else c for c in sequence )
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 51 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __A ( a_ :Iterable[str] , a_ :int) -> Generator[tuple[str, ...], None, None]:
__a : List[str] = iter(a_)
while True:
__a : List[Any] = tuple(itertools.islice(a_ , a_))
if not chunk:
return
yield chunk
def __A ( a_ :str) -> str:
__a : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters])
__a : Tuple = ''''''
if len(a_) < 2:
return dirty
for i in range(len(a_) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(a_) & 1:
clean += "X"
return clean
def __A ( a_ :str) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__a : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__a : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(a_)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(a_)
return table
def __A ( a_ :str , a_ :str) -> str:
__a : Optional[Any] = generate_table(a_)
__a : Optional[int] = prepare_input(a_)
__a : List[str] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_ , 2):
__a , __a : Optional[Any] = divmod(table.index(a_) , 5)
__a , __a : Tuple = divmod(table.index(a_) , 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __A ( a_ :str , a_ :str) -> str:
__a : Any = generate_table(a_)
__a : Any = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_ , 2):
__a , __a : Any = divmod(table.index(a_) , 5)
__a , __a : Union[str, Any] = divmod(table.index(a_) , 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 52 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = n
__lowerCAmelCase = [None] * self.n
__lowerCAmelCase = 0 # index of the first element
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.size
def lowercase ( self : Any ) -> bool:
return self.size == 0
def lowercase ( self : Any ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowercase ( self : str , lowerCAmelCase_ : List[str] ) -> Dict:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
__lowerCAmelCase = data
__lowerCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase ( self : Optional[Any] ) -> str:
if self.size == 0:
raise Exception('UNDERFLOW' )
__lowerCAmelCase = self.array[self.front]
__lowerCAmelCase = None
__lowerCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 53 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowercase : Tuple =random.Random()
def a__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ =global_rng
UpperCAmelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
def __init__( self: str , _lowerCAmelCase: str , _lowerCAmelCase: Tuple=7 , _lowerCAmelCase: Dict=400 , _lowerCAmelCase: List[Any]=2000 , _lowerCAmelCase: Any=2048 , _lowerCAmelCase: str=128 , _lowerCAmelCase: List[str]=1 , _lowerCAmelCase: Any=512 , _lowerCAmelCase: Union[str, Any]=30 , _lowerCAmelCase: Any=4_4100 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =min_seq_length
UpperCAmelCase_ =max_seq_length
UpperCAmelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ =spectrogram_length
UpperCAmelCase_ =feature_size
UpperCAmelCase_ =num_audio_channels
UpperCAmelCase_ =hop_length
UpperCAmelCase_ =chunk_length
UpperCAmelCase_ =sampling_rate
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: str=False , _lowerCAmelCase: Optional[int]=False ) -> Any:
'''simple docstring'''
def _flatten(_lowerCAmelCase: Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
UpperCAmelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( __lowercase , unittest.TestCase ):
_snake_case =TvltFeatureExtractor
def lowerCAmelCase__ ( self: int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "sampling_rate" ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =dict_first.pop("mel_filters" )
UpperCAmelCase_ =dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_json_file(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =dict_first.pop("mel_filters" )
UpperCAmelCase_ =dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCAmelCase_ =feature_extractor(
_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 , mask_audio=_lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ =np.asarray(_lowerCAmelCase )
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase_ =ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self._load_datasamples(1 )
UpperCAmelCase_ =TvltFeatureExtractor()
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
UpperCAmelCase_ =torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _lowerCAmelCase , atol=1e-4 ) )
| 54 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "efficientnet"
def __init__( self : Optional[int] ,A : int = 3 ,A : int = 6_00 ,A : float = 2.0 ,A : float = 3.1 ,A : int = 8 ,A : List[int] = [3, 3, 5, 3, 5, 5, 3] ,A : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] ,A : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] ,A : List[int] = [] ,A : List[int] = [1, 2, 2, 2, 1, 2, 1] ,A : List[int] = [1, 2, 2, 3, 3, 4, 1] ,A : List[int] = [1, 6, 6, 6, 6, 6, 6] ,A : float = 0.25 ,A : str = "swish" ,A : int = 25_60 ,A : str = "mean" ,A : float = 0.02 ,A : float = 0.0_01 ,A : float = 0.99 ,A : float = 0.5 ,A : float = 0.2 ,**A : Any ,):
super().__init__(**A )
__A = num_channels
__A = image_size
__A = width_coefficient
__A = depth_coefficient
__A = depth_divisor
__A = kernel_sizes
__A = in_channels
__A = out_channels
__A = depthwise_padding
__A = strides
__A = num_block_repeats
__A = expand_ratios
__A = squeeze_expansion_ratio
__A = hidden_act
__A = hidden_dim
__A = pooling_type
__A = initializer_range
__A = batch_norm_eps
__A = batch_norm_momentum
__A = dropout_rate
__A = drop_connect_rate
__A = sum(A ) * 4
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : Any ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 1E-5
| 55 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _a () -> Any:
"""simple docstring"""
__snake_case = HfArgumentParser(lowercase__ )
__snake_case = parser.parse_args_into_dataclasses()[0]
__snake_case = TensorFlowBenchmark(args=lowercase__ )
try:
__snake_case = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__snake_case = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
__snake_case = ' '.join(str(lowercase__ ).split(' ' )[:-1] )
__snake_case = ''
__snake_case = eval(str(lowercase__ ).split(' ' )[-1] )
__snake_case = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
__snake_case = full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 56 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ):
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _a ( self ):
UpperCamelCase_: List[Any] = self.dummy_uncond_unet
UpperCamelCase_: Union[str, Any] = KarrasVeScheduler()
UpperCamelCase_: List[str] = KarrasVePipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: int = torch.manual_seed(0 )
UpperCamelCase_: Dict = pipe(num_inference_steps=2 , generator=_lowerCamelCase , output_type='numpy' ).images
UpperCamelCase_: Tuple = torch.manual_seed(0 )
UpperCamelCase_: int = pipe(num_inference_steps=2 , generator=_lowerCamelCase , output_type='numpy' , return_dict=_lowerCamelCase )[0]
UpperCamelCase_: str = image[0, -3:, -3:, -1]
UpperCamelCase_: List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: List[str] = 'google/ncsnpp-celebahq-256'
UpperCamelCase_: str = UNetaDModel.from_pretrained(_lowerCamelCase )
UpperCamelCase_: Any = KarrasVeScheduler()
UpperCamelCase_: Union[str, Any] = KarrasVePipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: List[Any] = torch.manual_seed(0 )
UpperCamelCase_: Any = pipe(num_inference_steps=2_0 , generator=_lowerCamelCase , output_type='numpy' ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCamelCase_: Any = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 57 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=1_6 , _lowercase=3_6 , _lowercase=6 , _lowercase=6 , _lowercase=6 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> int:
'''simple docstring'''
snake_case_ : Dict = parent
snake_case_ : Dict = batch_size
snake_case_ : str = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Tuple = use_input_mask
snake_case_ : Dict = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : List[Any] = embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Any = num_hidden_groups
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_labels
snake_case_ : Dict = num_choices
snake_case_ : Tuple = scope
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_input_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = None
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
snake_case_ : List[str] = model(_lowercase , token_type_ids=_lowercase )
snake_case_ : Dict = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : List[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.num_labels
snake_case_ : Any = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Dict = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.num_labels
snake_case_ : List[str] = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.num_choices
snake_case_ : Any = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Union[str, Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
snake_case_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = AlbertModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : List[Any] = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = AlbertModel.from_pretrained("""albert-base-v2""" )
snake_case_ : str = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case_ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ : Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
snake_case_ : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowercase )
snake_case_ : Any = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 58 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 59 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.