code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Tuple = ['''vqvae''']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,mel=SCREAMING_SNAKE_CASE__ ,vqvae=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ ) else 10_00
@torch.no_grad()
def __call__( self ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__=True ,) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE :List[str] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE :Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=SCREAMING_SNAKE_CASE__ ,device=self.device ,)
__SCREAMING_SNAKE_CASE :Optional[Any] = noise
__SCREAMING_SNAKE_CASE :Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = np.frombuffer(input_image.tobytes() ,dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE :List[str] = (input_image / 2_55) * 2 - 1
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE :Dict = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE__ ,0 ) ).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE :int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE :Optional[int] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE :int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE :str = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE :Tuple = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE :Tuple = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
else:
__SCREAMING_SNAKE_CASE :Any = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
if isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :List[Any] = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,sample=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,)['''prev_sample''']
else:
__SCREAMING_SNAKE_CASE :Tuple = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,sample=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,)['''prev_sample''']
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE :str = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE :Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE :Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE :Dict = self.vqvae.decode(SCREAMING_SNAKE_CASE__ )['''sample''']
__SCREAMING_SNAKE_CASE :int = (images / 2 + 0.5).clamp(0 ,1 )
__SCREAMING_SNAKE_CASE :int = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
__SCREAMING_SNAKE_CASE :Dict = (images * 2_55).round().astype('''uint8''' )
__SCREAMING_SNAKE_CASE :Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE__ ,mode='''RGB''' ).convert('''L''' ) for _ in images) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE__ )[:, np.newaxis, :] ) ,**ImagePipelineOutput(SCREAMING_SNAKE_CASE__ ) )
@torch.no_grad()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE :Optional[Any] = (sample / 2_55) * 2 - 1
__SCREAMING_SNAKE_CASE :List[Any] = torch.Tensor(SCREAMING_SNAKE_CASE__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
__SCREAMING_SNAKE_CASE :str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE :Tuple = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE :Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE :Dict = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
__SCREAMING_SNAKE_CASE :List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE :int = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE :Tuple = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> torch.Tensor:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE__ ) ,torch.flatten(SCREAMING_SNAKE_CASE__ ) ) / torch.norm(SCREAMING_SNAKE_CASE__ ) / torch.norm(SCREAMING_SNAKE_CASE__ ) )
return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ ) | 191 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Any = '''xmod'''
def __init__( self ,SCREAMING_SNAKE_CASE__=3_05_22 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__="absolute" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=("en_XX",) ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE :Any = use_cache
__SCREAMING_SNAKE_CASE :List[str] = classifier_dropout
__SCREAMING_SNAKE_CASE :Any = pre_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reduction_factor
__SCREAMING_SNAKE_CASE :Dict = adapter_layer_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reuse_layer_norm
__SCREAMING_SNAKE_CASE :Tuple = ln_before_adapter
__SCREAMING_SNAKE_CASE :Any = list(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = default_language
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 191 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a :
"""simple docstring"""
def __init__( self : str , snake_case : Dict , snake_case : Dict=13 , snake_case : str=7 , snake_case : str=False , snake_case : Any=True , snake_case : int=False , snake_case : int=False , snake_case : Dict=19 , snake_case : Optional[int]=32 , snake_case : Union[str, Any]=5 , snake_case : Dict=4 , snake_case : Union[str, Any]=37 , snake_case : str="gelu" , snake_case : Any=0.1 , snake_case : Dict=0.1 , snake_case : str=512 , snake_case : Optional[Any]=16 , snake_case : Union[str, Any]=2 , snake_case : Any=0.02 , snake_case : str=3 , snake_case : Tuple=4 , snake_case : Optional[Any]=None , ) -> Dict:
__UpperCAmelCase : int = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Tuple = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Dict ) -> Any:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : str = None
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : Dict = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=snake_case , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def lowerCamelCase__ ( self : str , snake_case : List[Any] , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Tuple ) -> Optional[Any]:
__UpperCAmelCase : List[str] = EsmForProteinFolding(config=snake_case ).float()
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case )
__UpperCAmelCase : Tuple = model(snake_case )
__UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = (EsmForProteinFolding,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = ()
SCREAMING_SNAKE_CASE : Any = {} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase__ ( self : Dict ) -> Any:
__UpperCAmelCase : Union[str, Any] = EsmFoldModelTester(self )
__UpperCAmelCase : int = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ) -> Any:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@unittest.skip('''Does not support attention outputs''' )
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip
def lowerCamelCase__ ( self : List[str] ) -> str:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase__ ( self : int ) -> Any:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowerCamelCase__ ( self : List[str] ) -> str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase__ ( self : str ) -> Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase__ ( self : Dict ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase__ ( self : str ) -> Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowerCamelCase__ ( self : str ) -> List[Any]:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowerCamelCase__ ( self : int ) -> int:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCamelCase__ ( self : Tuple ) -> int:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase__ ( self : Any ) -> Tuple:
pass
@require_torch
class a ( _a ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : List[str] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
__UpperCAmelCase : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__UpperCAmelCase : int = model(snake_case )['''positions''']
__UpperCAmelCase : Optional[Any] = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , snake_case , atol=1E-4 ) ) | 240 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : str , snake_case : Dict=13 , snake_case : Optional[Any]=7 , snake_case : Tuple=True , snake_case : Optional[int]=True , snake_case : str=True , snake_case : int=True , snake_case : List[str]=99 , snake_case : Any=32 , snake_case : List[str]=2 , snake_case : Tuple=4 , snake_case : Union[str, Any]=37 , snake_case : Dict="gelu" , snake_case : str=0.1 , snake_case : List[Any]=0.1 , snake_case : Any=512 , snake_case : Optional[Any]=16 , snake_case : Optional[int]=2 , snake_case : Union[str, Any]=0.02 , snake_case : List[Any]=3 , snake_case : str=4 , snake_case : int=None , snake_case : Union[str, Any]=1000 , ) -> Tuple:
__UpperCAmelCase : int = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Dict = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : List[Any] = scope
__UpperCAmelCase : str = range_bbox
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCAmelCase : Optional[int] = bbox[i, j, 3]
__UpperCAmelCase : Any = bbox[i, j, 1]
__UpperCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCAmelCase : str = bbox[i, j, 2]
__UpperCAmelCase : List[Any] = bbox[i, j, 0]
__UpperCAmelCase : Dict = t
__UpperCAmelCase : Any = tf.convert_to_tensor(snake_case )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Optional[int] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] , snake_case : int , snake_case : str , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Any , snake_case : List[Any] , snake_case : Any ) -> Optional[Any]:
__UpperCAmelCase : Tuple = TFLayoutLMModel(config=snake_case )
__UpperCAmelCase : Optional[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : Tuple = model(snake_case , snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str ) -> int:
__UpperCAmelCase : Any = TFLayoutLMForMaskedLM(config=snake_case )
__UpperCAmelCase : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Tuple , snake_case : Any , snake_case : Dict , snake_case : str , snake_case : Tuple , snake_case : str , snake_case : Optional[Any] , snake_case : str , snake_case : str ) -> Any:
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Optional[int] = TFLayoutLMForSequenceClassification(config=snake_case )
__UpperCAmelCase : Any = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Dict , snake_case : List[str] , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : List[str] ) -> List[str]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Optional[int] = TFLayoutLMForTokenClassification(config=snake_case )
__UpperCAmelCase : Any = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int , snake_case : Dict , snake_case : int , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] ) -> Dict:
__UpperCAmelCase : int = TFLayoutLMForQuestionAnswering(config=snake_case )
__UpperCAmelCase : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Tuple = config_and_inputs
__UpperCAmelCase : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[str] = 1_0
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = TFLayoutLMModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Any ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def lowerCamelCase__ ( self : Dict ) -> Dict:
pass
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : str = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
__UpperCAmelCase : Optional[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__UpperCAmelCase : Optional[Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__UpperCAmelCase : str = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__UpperCAmelCase : Optional[int] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase : int = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : Dict = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
__UpperCAmelCase : str = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1E-3 ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> int:
# initialize model with randomly initialized sequence classification head
__UpperCAmelCase : str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : Tuple = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__UpperCAmelCase : str = outputs.loss
__UpperCAmelCase : Optional[Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
__UpperCAmelCase : List[str] = outputs.logits
__UpperCAmelCase : List[Any] = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> str:
# initialize model with randomly initialized token classification head
__UpperCAmelCase : Union[str, Any] = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : Tuple = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
__UpperCAmelCase : List[Any] = outputs.logits
__UpperCAmelCase : Optional[int] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
# initialize model with randomly initialized token classification head
__UpperCAmelCase : Dict = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : Optional[Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case ) | 240 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
snake_case_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( _A : Optional[Any] , _A : List[str]=7 ) ->str:
"""simple docstring"""
lowerCamelCase_ =None
if token is not None:
lowerCamelCase_ ={"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
lowerCamelCase_ ="""636036"""
lowerCamelCase_ =f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
lowerCamelCase_ =requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def __UpperCamelCase ( _A : Optional[int] ) ->int:
"""simple docstring"""
lowerCamelCase_ =get_daily_ci_runs(_A )
lowerCamelCase_ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase_ =workflow_run["""id"""]
break
return workflow_run_id
def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
lowerCamelCase_ =get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase_ =artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def __UpperCamelCase ( _A : int , _A : Any , _A : Optional[int] ) ->List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
lowerCamelCase_ ={}
for artifact_name in artifact_names:
lowerCamelCase_ =os.path.join(_A , f'{artifact_name}.zip' )
if os.path.isfile(_A ):
lowerCamelCase_ ={}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
lowerCamelCase_ =f.read().decode("""UTF-8""" )
return results
| 154 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ):
__lowercase = AutoConfig.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__lowercase = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
AutoTokenizer.from_pretrained(lowerCamelCase_ ).save_pretrained(lowerCamelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 217 |
'''simple docstring'''
import numpy as np
def _lowerCAmelCase ( lowerCamelCase_ : np.array ):
return 1 / (1 + np.exp(-vector ))
def _lowerCAmelCase ( lowerCamelCase_ : np.array ):
return vector * sigmoid(1.7_02 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __snake_case ( _lowercase):
snake_case__ : str = "xlm-roberta-xl"
def __init__( self : Optional[int] , __lowerCAmelCase : int=2_5_0_8_8_0 , __lowerCAmelCase : Union[str, Any]=2_5_6_0 , __lowerCAmelCase : Tuple=3_6 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : int=1_0_2_4_0 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=5_1_4 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : str=1E-05 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : int = type_vocab_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = position_embedding_type
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Union[str, Any] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
snake_case__ : Optional[int] = object()
# For specifying empty leaf dict `{}`
snake_case__ : Union[str, Any] = object()
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
__A = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(lowerCAmelCase__ ) - len(lowerCAmelCase__ ) + 1 ):
__A = [x.match(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , ks[i:] )]
if matches and all(lowerCAmelCase__ ):
return True
return False
def _a ( lowerCamelCase: List[Any] ) -> Any:
'''simple docstring'''
def replace(lowerCamelCase: Union[str, Any] , lowerCamelCase: Any ):
for rule, replacement in rules:
if _match(lowerCAmelCase__ , lowerCAmelCase__ ):
return replacement
return val
return replace
def _a ( ) -> List[Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _a ( lowerCamelCase: Dict ) -> Optional[int]:
'''simple docstring'''
__A = _get_partition_rules()
__A = _replacement_rules(lowerCAmelCase__ )
__A = {k: _unmatched for k in flatten_dict(lowerCAmelCase__ )}
__A = {k: replace(lowerCAmelCase__ , lowerCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase__ ) )
| 361 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {'vocab_file': 'vocab.txt'}
snake_case__ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
snake_case__ : Optional[int] = {
'openbmb/cpm-ant-10b': 1024,
}
def _a ( lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = collections.OrderedDict()
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class A_ ( _lowerCamelCase ):
def __init__(self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int]="<unk>" , _UpperCamelCase :List[str]=200 )-> List[str]:
__A = vocab
__A = unk_token
__A = max_input_chars_per_word
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[Any] )-> str:
__A = list(_UpperCamelCase )
if len(_UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__A = 0
__A = []
while start < len(_UpperCamelCase ):
__A = len(_UpperCamelCase )
__A = None
while start < end:
__A = ''''''.join(chars[start:end] )
if substr in self.vocab:
__A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCamelCase )
__A = end
return sub_tokens
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = False
def __init__(self :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any="<d>" , _UpperCamelCase :List[str]="</d>" , _UpperCamelCase :Dict="<s>" , _UpperCamelCase :Optional[Any]="</s>" , _UpperCamelCase :Optional[int]="<pad>" , _UpperCamelCase :List[str]="<unk>" , _UpperCamelCase :str="</n>" , _UpperCamelCase :Optional[int]="</_>" , _UpperCamelCase :Optional[Any]="left" , **_UpperCamelCase :Any , )-> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_UpperCamelCase , eod_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , unk_token=_UpperCamelCase , line_token=_UpperCamelCase , space_token=_UpperCamelCase , padding_side=_UpperCamelCase , **_UpperCamelCase , )
__A = bod_token
__A = eod_token
__A = load_vocab(_UpperCamelCase )
__A = self.encoder[space_token]
__A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
__A = {v: k for k, v in self.encoder.items()}
__A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase (self :Optional[int] )-> Dict:
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase (self :Any )-> List[Any]:
return self.encoder["\n"]
@property
def _lowerCAmelCase (self :List[str] )-> int:
return len(self.encoder )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Dict )-> Union[str, Any]:
__A = []
for x in jieba.cut(_UpperCamelCase , cut_all=_UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCamelCase ) )
return output_tokens
def _lowerCAmelCase (self :str , _UpperCamelCase :int , **_UpperCamelCase :List[str] )-> Tuple:
__A = [i for i in token_ids if i >= 0]
__A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[int] )-> List[str]:
return token in self.encoder
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[str] )-> str:
return "".join(_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> List[Any]:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Tuple )-> int:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if os.path.isdir(_UpperCamelCase ):
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__A = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__A = 0
if " " in self.encoder:
__A = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__A = self.encoder['''\n''']
del self.encoder["\n"]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__A = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[int] , _UpperCamelCase :List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase ))
| 250 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase_ = logging.getLogger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : torch.nn.Module , SCREAMING_SNAKE_CASE__ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
'''simple docstring'''
A__ = bnb_quantization_config.load_in_abit
A__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
A__ = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1:
A__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ )
A__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ = []
A__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ )
# compatibility with peft
A__ = load_in_abit
A__ = load_in_abit
A__ = get_parameter_device(SCREAMING_SNAKE_CASE__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
A__ = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
# convert param to the right dtype
A__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ = name.replace('.weight' , '' ).replace('.bias' , '' )
A__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
param.to(SCREAMING_SNAKE_CASE__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
A__ = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
A__ = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_memory=SCREAMING_SNAKE_CASE__ , no_split_module_classes=SCREAMING_SNAKE_CASE__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ = True
A__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE__ , offload_state_dict=SCREAMING_SNAKE_CASE__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , offload_dir=SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
A__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
A__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ = {}
A__ = special_dtypes
A__ = no_split_module_classes
A__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ = get_balanced_memory(
SCREAMING_SNAKE_CASE__ , low_zero=(device_map == 'balanced_low_0') , max_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A__ = max_memory
A__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# check if don't have any quantized module on the cpu
A__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if modules_to_not_convert is None:
A__ = []
A__ , A__ = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Union[str, Any]:
'''simple docstring'''
A__ = False
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ = '.'.join(SCREAMING_SNAKE_CASE__ )
A__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
A__ = module.weight.data
if module.bias is not None:
A__ = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = True
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
with init_empty_weights():
A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ = find_tied_parameters(SCREAMING_SNAKE_CASE__ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(SCREAMING_SNAKE_CASE__ , [] )
A__ = len(SCREAMING_SNAKE_CASE__ ) > 0
# Check if it is a base model
A__ = False
if hasattr(SCREAMING_SNAKE_CASE__ , 'base_model_prefix' ):
A__ = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
A__ = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ )
# remove ".weight" from the keys
A__ = ['.weight', '.bias']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(SCREAMING_SNAKE_CASE__ , '' )
filtered_module_names.append(SCREAMING_SNAKE_CASE__ )
return filtered_module_names
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE__ , bnb.nn.Linearabit ):
return True
return False
def _snake_case( SCREAMING_SNAKE_CASE__ : nn.Module ) -> Union[str, Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , dtype=SCREAMING_SNAKE_CASE__ , value=SCREAMING_SNAKE_CASE__ )
A__ = param_name
A__ = model
if "." in tensor_name:
A__ = tensor_name.split('.' )
for split in splits[:-1]:
A__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
A__ = new_module
A__ = splits[-1]
# offload weights
A__ = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , )
else:
offload_weight(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
offload_weight(SCREAMING_SNAKE_CASE__ , param_name.replace('weight' , 'SCB' ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'meta' , dtype=SCREAMING_SNAKE_CASE__ , value=torch.empty(*param.size() ) )
| 7 |
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]:
'''simple docstring'''
A__ = name
A__ = value
A__ = weight
def __repr__( self : int )-> Tuple:
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
return self.value
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
return self.name
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
return self.weight
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
return self.value / self.weight
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
'''simple docstring'''
A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ )
A__ = []
A__ , A__ = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _snake_case( ) -> Any:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> List[Any]:
return 1 if input_a == input_a else 0
def lowerCamelCase__ ( ) -> Optional[int]:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 350 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __A :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=19 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[Any]=None , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCAmelCase_ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = EsmForProteinFolding(config=UpperCAmelCase_ ).float()
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = False
__lowercase: Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
__lowercase: List[Any] = ()
__lowercase: Union[str, Any] = {} if is_torch_available() else {}
__lowercase: List[Any] = False
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = EsmFoldModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@unittest.skip("""Does not support attention outputs""" )
def lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold only has one output format.""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
@require_torch
class __A (snake_case__):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
snake_case_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case_ = model(UpperCAmelCase_ )["""positions"""]
snake_case_ = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (snake_case__):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 347 | 1 |
"""simple docstring"""
UpperCAmelCase : int = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
UpperCAmelCase : Optional[int] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) -> float:
'''simple docstring'''
__UpperCAmelCase : str = from_type.lower().strip("""s""" )
__UpperCAmelCase : Tuple = to_type.lower().strip("""s""" )
__UpperCAmelCase : Dict = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Tuple = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if from_sanitized not in METRIC_CONVERSION:
__UpperCAmelCase : List[Any] = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if to_sanitized not in METRIC_CONVERSION:
__UpperCAmelCase : Optional[Any] = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : str = METRIC_CONVERSION[from_sanitized]
__UpperCAmelCase : List[str] = METRIC_CONVERSION[to_sanitized]
__UpperCAmelCase : Tuple = 1
if from_exponent > to_exponent:
__UpperCAmelCase : str = from_exponent - to_exponent
else:
__UpperCAmelCase : Optional[int] = -(to_exponent - from_exponent)
return value * pow(1_0 , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 |
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase__ = TOKENIZER_CLASSES
else:
UpperCAmelCase__ = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + """Fast""" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase__ = True
if checkpoint_name is None:
UpperCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase__ = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase__ = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase__ , UpperCAmelCase__ = checkpoint.split("""/""" )
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
UpperCAmelCase__ = checkpoint
UpperCAmelCase__ = dump_path
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase__ = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase__ = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
UpperCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 346 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_image(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 346 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A_ : Any =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , a__ , )
super().__init__(*a__ , **a__ )
| 370 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
A_ : int =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "vision-encoder-decoder"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def __init__( self , **a__ ):
super().__init__(**a__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
_lowerCamelCase = kwargs.pop('encoder' )
_lowerCamelCase = encoder_config.pop('model_type' )
_lowerCamelCase = kwargs.pop('decoder' )
_lowerCamelCase = decoder_config.pop('model_type' )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = True
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__ ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCamelCase = True
_lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__ )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.encoder.to_dict()
_lowerCamelCase = self.decoder.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ):
return 1e-4
@property
def snake_case_ ( self ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
_lowerCamelCase = OrderedDict()
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def snake_case_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
import torch
_lowerCamelCase = OrderedDict()
_lowerCamelCase = super().generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
_lowerCamelCase , _lowerCamelCase = dummy_input['input_ids'].shape
_lowerCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase = dummy_input.pop('input_ids' )
_lowerCamelCase = dummy_input.pop('attention_mask' )
_lowerCamelCase = torch.zeros(a__ )
return common_inputs
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
pass
def snake_case_ ( self , a__ ):
return VisionEncoderDecoderEncoderOnnxConfig(a__ )
def snake_case_ ( self , a__ , a__ , a__ = "default" ):
_lowerCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a__ , a__ )
| 80 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=False ):
'''simple docstring'''
try:
lowerCAmelCase : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase : int = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase : List[str] = strtobool(SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCAmelCase__ = parse_flag_from_env('''RUN_SLOW''', default=False)
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return unittest.skip("Test was skipped" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(SCREAMING_SNAKE_CASE , version=SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version(">=" , SCREAMING_SNAKE_CASE ) , f"""test requires torch version >= {version}""" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : str =True
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCAmelCase : Dict = tempfile.mkdtemp()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase__ ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = mocks if isinstance(snake_case__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : List[str] = AcceleratorState()
lowerCAmelCase : Dict = tensor[None].clone().to(state.device )
lowerCAmelCase : str = gather(SCREAMING_SNAKE_CASE ).cpu()
lowerCAmelCase : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , SCREAMING_SNAKE_CASE ):
return False
return True
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = returncode
lowerCAmelCase : Optional[int] = stdout
lowerCAmelCase : str = stderr
async def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
while True:
lowerCAmelCase : Tuple = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE )
else:
break
async def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Tuple=False ):
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase : Dict = []
lowerCAmelCase : Dict = []
def tee(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]="" ):
lowerCAmelCase : List[Any] = line.decode("utf-8" ).rstrip()
sink.append(SCREAMING_SNAKE_CASE )
if not quiet:
print(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , file=SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stderr , label="stderr:" ) ) ),
] , timeout=SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Any=1_8_0 , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=True ):
'''simple docstring'''
lowerCAmelCase : Any = asyncio.get_event_loop()
lowerCAmelCase : Optional[int] = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , stdin=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , quiet=SCREAMING_SNAKE_CASE , echo=SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Tuple = " ".join(SCREAMING_SNAKE_CASE )
if result.returncode > 0:
lowerCAmelCase : int = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
pass
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
try:
lowerCAmelCase : List[Any] = subprocess.check_output(SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(SCREAMING_SNAKE_CASE , "decode" ):
lowerCAmelCase : Union[str, Any] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 108 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Any = logging.get_logger(__name__)
__A : Dict = {'vocab_file': 'spiece.model'}
__A : List[Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> None:
lowerCamelCase_ =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =3
lowerCamelCase_ =do_lower_case
lowerCamelCase_ =remove_space
lowerCamelCase_ =keep_accents
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase_ =jieba
lowerCamelCase_ =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self )-> Any:
return len(self.sp_model )
def _snake_case ( self )-> Dict:
lowerCamelCase_ ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self )-> List[Any]:
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int:
if self.remove_space:
lowerCamelCase_ =""" """.join(inputs.strip().split() )
else:
lowerCamelCase_ =inputs
lowerCamelCase_ =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase_ =unicodedata.normalize("""NFKD""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ ="""""".join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCamelCase_ =outputs.lower()
return outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[]
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase_ =self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ =cur_pieces[1:]
else:
lowerCamelCase_ =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ ="""""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ =os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Any:
lowerCamelCase_ =super()._decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 154 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowercase_ ( _lowerCamelCase: Any=None ) -> int:
'''simple docstring'''
__lowerCamelCase : Tuple = argparse.ArgumentParser(add_help=_lowerCamelCase , allow_abbrev=_lowerCamelCase )
# The main config parser
__lowerCamelCase : List[Any] = config_command_parser(_lowerCamelCase )
# The subparser to add commands to
__lowerCamelCase : int = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_lowerCamelCase , parents=[parent_parser] )
update_command_parser(_lowerCamelCase , parents=[parent_parser] )
return config_parser
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = get_config_parser()
__lowerCamelCase : List[Any] = config_parser.parse_args()
if not hasattr(_lowerCamelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_lowerCamelCase )
if __name__ == "__main__":
main() | 64 | """simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase_ ( _lowerCamelCase: str ) -> int:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCamelCase : int = model_type_to_module_name(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , "__name__" , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCamelCase : int = importlib.import_module("transformers" )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def lowercase_ ( _lowerCamelCase: Union[str, os.PathLike] , _lowerCamelCase: Optional[Union[str, os.PathLike]] = None , _lowerCamelCase: bool = False , _lowerCamelCase: bool = False , _lowerCamelCase: Optional[Dict[str, str]] = None , _lowerCamelCase: Optional[Union[bool, str]] = None , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: bool = False , **_lowerCamelCase: Tuple , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[str] = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(_lowerCamelCase , encoding="utf-8" ) as reader:
return json.load(_lowerCamelCase )
class _snake_case :
def __init__( self : Tuple ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase )
def lowerCamelCase__ ( cls : Dict , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
__lowerCamelCase : int = kwargs.pop("config" , UpperCAmelCase )
__lowerCamelCase : Dict = kwargs.pop("trust_remote_code" , UpperCAmelCase )
__lowerCamelCase : Any = True
__lowerCamelCase , __lowerCamelCase : str = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Optional[int] = config_dict.get("image_processor_type" , UpperCAmelCase )
__lowerCamelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : List[str] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowerCamelCase : Dict = config_dict.pop("feature_extractor_type" , UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__lowerCamelCase : Tuple = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
__lowerCamelCase : Optional[int] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : int = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# It could be in `config.image_processor_type``
__lowerCamelCase : int = getattr(UpperCAmelCase , "image_processor_type" , UpperCAmelCase )
if hasattr(UpperCAmelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__lowerCamelCase : Optional[int] = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__lowerCamelCase : Any = image_processor_class_from_name(UpperCAmelCase )
__lowerCamelCase : str = image_processor_auto_map is not None
__lowerCamelCase : Optional[Any] = image_processor_class is not None or type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
__lowerCamelCase : Dict = resolve_trust_remote_code(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if has_remote_code and trust_remote_code:
__lowerCamelCase : Optional[Any] = get_class_from_dynamic_module(
UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : List[Any] = kwargs.pop("code_revision" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
__lowerCamelCase : Tuple = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase )]
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase , UpperCAmelCase ) | 64 | 1 |
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for ch in input_str:
SCREAMING_SNAKE_CASE_ : Any = ord(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pow(2 , a )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
import os
def A_ ( a = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE_ : Dict = in_file.read()
SCREAMING_SNAKE_CASE_ : Dict = [[int(a ) for cell in row.split(',' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE_ : str = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE_ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE_ : Any = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Dict = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 253 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( A__ , A__):
@register_to_config
def __init__( self , A_ = 128 , A_ = 256 , A_ = 2000.0 , A_ = 768 , A_ = 12 , A_ = 12 , A_ = 64 , A_ = 2048 , A_ = 0.1 , )-> Dict:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Sequential(
nn.Linear(lowerCamelCase__ , d_model * 4 , bias=lowerCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase__ ) , nn.SiLU() , )
UpperCamelCase = nn.Embedding(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = False
UpperCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
UpperCamelCase = nn.Dropout(p=lowerCamelCase__ )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(lowerCamelCase__ ):
# FiLM conditional T5 decoder
UpperCamelCase = DecoderLayer(d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ )
self.decoders.append(lowerCamelCase__ )
UpperCamelCase = TaLayerNorm(lowerCamelCase__ )
UpperCamelCase = nn.Dropout(p=lowerCamelCase__ )
UpperCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
def UpperCAmelCase_ ( self , A_ , A_ )-> str:
'''simple docstring'''
UpperCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase = self.conditioning_emb(lowerCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase = torch.broadcast_to(
torch.arange(lowerCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase = self.position_encoding(lowerCamelCase__ )
UpperCamelCase = self.continuous_inputs_projection(lowerCamelCase__ )
inputs += position_encodings
UpperCamelCase = self.dropout(lowerCamelCase__ )
# decoder: No padding present.
UpperCamelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase = [(x, self.encoder_decoder_mask(lowerCamelCase__ , lowerCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase = lyr(
lowerCamelCase__ , conditioning_emb=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )[0]
UpperCamelCase = self.decoder_norm(lowerCamelCase__ )
UpperCamelCase = self.post_dropout(lowerCamelCase__ )
UpperCamelCase = self.spec_out(lowerCamelCase__ )
return spec_out
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=1e-6 )-> int:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , dropout_rate=lowerCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , dropout_rate=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ ) )
def UpperCAmelCase_ ( self , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , )-> Dict:
'''simple docstring'''
UpperCamelCase = self.layer[0](
lowerCamelCase__ , conditioning_emb=lowerCamelCase__ , attention_mask=lowerCamelCase__ , )
if encoder_hidden_states is not None:
UpperCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase = self.layer[1](
lowerCamelCase__ , key_value_states=lowerCamelCase__ , attention_mask=lowerCamelCase__ , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase = self.layer[-1](lowerCamelCase__ , lowerCamelCase__ )
return (hidden_states,)
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = TaLayerNorm(lowerCamelCase__ )
UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase__ )
UpperCamelCase = Attention(query_dim=lowerCamelCase__ , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , out_bias=lowerCamelCase__ , scale_qk=lowerCamelCase__ )
UpperCamelCase = nn.Dropout(lowerCamelCase__ )
def UpperCAmelCase_ ( self , A_ , A_=None , A_=None , )-> Dict:
'''simple docstring'''
UpperCamelCase = self.layer_norm(lowerCamelCase__ )
if conditioning_emb is not None:
UpperCamelCase = self.FiLMLayer(lowerCamelCase__ , lowerCamelCase__ )
# Self-attention block
UpperCamelCase = self.attention(lowerCamelCase__ )
UpperCamelCase = hidden_states + self.dropout(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
super().__init__()
UpperCamelCase = Attention(query_dim=lowerCamelCase__ , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , out_bias=lowerCamelCase__ , scale_qk=lowerCamelCase__ )
UpperCamelCase = TaLayerNorm(lowerCamelCase__ , eps=lowerCamelCase__ )
UpperCamelCase = nn.Dropout(lowerCamelCase__ )
def UpperCAmelCase_ ( self , A_ , A_=None , A_=None , )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.layer_norm(lowerCamelCase__ )
UpperCamelCase = self.attention(
lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase = hidden_states + self.dropout(lowerCamelCase__ )
return layer_output
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
super().__init__()
UpperCamelCase = TaDenseGatedActDense(d_model=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ )
UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase__ )
UpperCamelCase = TaLayerNorm(lowerCamelCase__ , eps=lowerCamelCase__ )
UpperCamelCase = nn.Dropout(lowerCamelCase__ )
def UpperCAmelCase_ ( self , A_ , A_=None )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.layer_norm(lowerCamelCase__ )
if conditioning_emb is not None:
UpperCamelCase = self.film(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = self.DenseReluDense(lowerCamelCase__ )
UpperCamelCase = hidden_states + self.dropout(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
UpperCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
UpperCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
UpperCamelCase = nn.Dropout(lowerCamelCase__ )
UpperCamelCase = NewGELUActivation()
def UpperCAmelCase_ ( self , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.act(self.wi_a(lowerCamelCase__ ) )
UpperCamelCase = self.wi_a(lowerCamelCase__ )
UpperCamelCase = hidden_gelu * hidden_linear
UpperCamelCase = self.dropout(lowerCamelCase__ )
UpperCamelCase = self.wo(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_=1e-6 )-> str:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Parameter(torch.ones(lowerCamelCase__ ) )
UpperCamelCase = eps
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase__ )
UpperCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowerCamelCase__ , 3.0 )) ))
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Linear(lowerCamelCase__ , out_features * 2 , bias=lowerCamelCase__ )
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.scale_bias(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase = torch.chunk(lowerCamelCase__ , 2 , -1 )
UpperCamelCase = x * (1 + scale) + shift
return x
| 357 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = WavaVecaPhonemeCTCTokenizer
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
super().setUp()
UpperCamelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def UpperCAmelCase_ ( self , A_ , A_=False , A_=20 , A_=5 )-> Tuple[str, list]:
'''simple docstring'''
UpperCamelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )) for i in range(len(A_ ) )]
UpperCamelCase = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
UpperCamelCase = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
UpperCamelCase = ' ' + output_txt
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCamelCase = tokenizer('m xxx ɪ' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCamelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCamelCase = tokenizer('maɪ c' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [3, 200] ) # mai should be <unk> (=3)
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids )
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCamelCase = tokenizer.decode(sample_ids[0] )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0] )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=A_ )
UpperCamelCase = tokenizer.batch_decode(A_ , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=A_ )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer(A_ , phonemizer_lang='en-us' ).input_ids
UpperCamelCase = tokenizer(A_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(A_ , A_ )
UpperCamelCase = tokenizer.decode(A_ )
UpperCamelCase = tokenizer.decode(A_ )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(A_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how Are you'
UpperCamelCase = 'hello how are you'
UpperCamelCase = tokenizer(A_ ).input_ids
UpperCamelCase = tokenizer(A_ ).input_ids
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase_ ( A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCamelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCamelCase = tokenizer.decode(A_ , output_char_offsets=A_ , filter_word_delimiter_token=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(A_ , A_ ):
self.assertTrue(isinstance(A_ , A_ ) )
self.assertTrue(isinstance(outputs_list[0] , A_ ) )
# transform list to ModelOutput
UpperCamelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(A_ , A_ ):
if isinstance(A_ , A_ ):
[recursive_check(A_ , A_ ) for la, la in zip(A_ , A_ )]
self.assertEqual(A_ , A_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCamelCase = tokenizer.batch_decode(A_ , output_char_offsets=A_ )
UpperCamelCase = [tokenizer.decode(A_ , output_char_offsets=A_ ) for ids in sample_ids]
check_list_tuples_equal(A_ , A_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCamelCase = tokenizer.add_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCamelCase = tokenizer.add_special_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
UpperCamelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCamelCase = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(output['text'] , A_ )
| 251 | 0 |
def A (__A : int = 1000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
UpperCAmelCase_ = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ = prev_numerator + prev_denominator
if len(str(__A ) ) > len(str(__A ) ):
result.append(__A )
UpperCAmelCase_ = numerator
UpperCAmelCase_ = denominator
return len(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 51 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 184 |
'''simple docstring'''
_lowerCAmelCase = '''Input must be a string of 8 numbers plus letter'''
_lowerCAmelCase = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = f"""Expected string as input, found {type(UpperCamelCase ).__name__}"""
raise TypeError(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCamelCase ) != 9:
raise ValueError(UpperCamelCase )
try:
lowerCAmelCase__ : Optional[int] = int(spanish_id_clean[0:8] )
lowerCAmelCase__ : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 | 1 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def a ( lowerCamelCase_ = 100_0000 ):
'''simple docstring'''
lowercase__ = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 207 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(lowerCAmelCase ) / len(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[str] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCAmelCase__ : List[str] = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : int = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Tuple = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a (self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : int = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase__ : List[str] = problem_type["""title"""]
UpperCAmelCase__ : List[Any] = problem_type["""num_labels"""]
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ : Optional[int] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCAmelCase__ : str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
UpperCAmelCase__ : Any = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _a (self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a (self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
# verify the logits
UpperCAmelCase__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Dict = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
UpperCAmelCase__ : Dict = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ : int = model(_lowerCamelCase )
| 166 | 1 |
"""simple docstring"""
from math import factorial, radians
def A_ ( _lowercase, _lowercase = 18, _lowercase = 10 ):
'''simple docstring'''
snake_case_ :Tuple = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
snake_case_ :Tuple = radians(_lowercase )
snake_case_ :Dict = angle_in_radians
snake_case_ :Any = 3
snake_case_ :Dict = -1
for _ in range(_lowercase ):
result += (b * (angle_in_radians**a)) / factorial(_lowercase )
snake_case_ :Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_lowercase, _lowercase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 66 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = RoFormerTokenizer
lowerCAmelCase_ = RoFormerTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def __a ( self : List[Any] ):
"""simple docstring"""
super().setUp()
def __a ( self : Optional[Any] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def __a ( self : Any , **_lowercase : Union[str, Any] ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE__ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __a ( self : Dict ):
"""simple docstring"""
pass
def __a ( self : Dict ):
"""simple docstring"""
pass
def __a ( self : List[str] ):
"""simple docstring"""
pass
| 351 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 204 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a_ = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="ernie_m"
UpperCamelCase ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCamelCase_ = 25_00_02 , UpperCamelCase_ = 7_68 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = 30_72 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 5_14 , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1E-05 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : List[str] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Any = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = max_position_embeddings
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = classifier_dropout
__lowercase : List[Any] = is_decoder
__lowercase : List[str] = act_dropout
| 249 |
"""simple docstring"""
from __future__ import annotations
a_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
__lowercase : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the reference grid
__lowercase : Optional[int] = 1
__lowercase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the action grid
__lowercase : List[str] = init[0]
__lowercase : Optional[Any] = init[1]
__lowercase : int = 0
__lowercase : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__lowercase : Optional[Any] = [[f, g, x, y]]
__lowercase : Union[str, Any] = False # flag that is set when search is complete
__lowercase : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowercase : str = cell.pop()
__lowercase : List[Any] = next_cell[2]
__lowercase : Optional[int] = next_cell[3]
__lowercase : Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowercase : List[Any] = True
else:
for i in range(len(__UpperCamelCase ) ): # to try out different valid actions
__lowercase : Union[str, Any] = x + DIRECTIONS[i][0]
__lowercase : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowercase : str = g + cost
__lowercase : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowercase : Dict = 1
__lowercase : List[Any] = i
__lowercase : Dict = []
__lowercase : List[Any] = goal[0]
__lowercase : Tuple = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowercase : Any = x - DIRECTIONS[action[x][y]][0]
__lowercase : Dict = y - DIRECTIONS[action[x][y]][1]
__lowercase : List[Any] = xa
__lowercase : Optional[Any] = ya
invpath.append([x, y] )
__lowercase : Optional[int] = []
for i in range(len(__UpperCamelCase ) ):
path.append(invpath[len(__UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
a_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a_ = [0, 0]
# all coordinates are given in format [y,x]
a_ = [len(grid) - 1, len(grid[0]) - 1]
a_ = 1
# the cost map which pushes the path closer to the goal
a_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a_ = 9_9
a_ , a_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 249 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:str = """▁"""
SCREAMING_SNAKE_CASE_:Dict = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
SCREAMING_SNAKE_CASE_:str = {
"""facebook/xglm-564M""": 2_048,
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="</s>", lowerCamelCase__="<s>", lowerCamelCase__="<unk>", lowerCamelCase__="<pad>", lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
A : Optional[Any] = 7
A : Optional[int] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
A : str = kwargs.get("""additional_special_tokens""", [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, sep_token=lowerCamelCase__, cls_token=lowerCamelCase__, pad_token=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase__, )
A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
A : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
A : int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
A : Any = len(self.sp_model )
A : Dict = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase__ )
A : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
A : str = self.__dict__.copy()
A : Optional[int] = None
A : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCamelCase__ ):
A : Tuple = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : Optional[int] = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
A : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__, token_ids_a=lowerCamelCase__, already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ ))
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Dict = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _lowerCAmelCase ( self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _lowerCAmelCase ( self ):
A : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A : List[str] = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__, """ """ ).strip()
return out_string
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Optional[Any] = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__, """wb""" ) as fi:
A : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_:Any = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Tuple = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def __lowercase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_a : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def __lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_a )
def __lowercase ( self ) -> Optional[int]:
_a : str = self.dummy_uncond_unet
_a : int = DDIMScheduler()
_a : List[str] = self.dummy_vq_model
_a : Union[str, Any] = LDMPipeline(unet=_a , vqvae=_a , scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : Any = torch.manual_seed(0 )
_a : Tuple = ldm(generator=_a , num_inference_steps=2 , output_type='''numpy''' ).images
_a : Union[str, Any] = torch.manual_seed(0 )
_a : Any = ldm(generator=_a , num_inference_steps=2 , output_type='''numpy''' , return_dict=_a )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a : Optional[int] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_a : Tuple = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Optional[int] = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : Optional[Any] = torch.manual_seed(0 )
_a : Tuple = ldm(generator=_a , num_inference_steps=5 , output_type='''numpy''' ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_a : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_a : int = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 235 |
import operator as op
a__ = '''scaler.pt'''
a__ = '''pytorch_model'''
a__ = '''random_states'''
a__ = '''optimizer'''
a__ = '''scheduler'''
a__ = '''pytorch_model.bin'''
a__ = '''pytorch_model.bin.index.json'''
a__ = '''model.safetensors'''
a__ = '''model.safetensors.index.json'''
a__ = '''1.10.2'''
a__ = '''py38'''
a__ = '''4.17.0'''
a__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
a__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
a__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
a__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
a__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
a__ = '''2.0.1'''
a__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
a__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
a__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
a__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
a__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 235 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowercase: str = "#"
class UpperCAmelCase :
def __init__( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = {}
def lowercase_ ( self : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self._trie
for char in text:
if char not in trie:
UpperCamelCase__ = {}
UpperCamelCase__ = trie[char]
UpperCamelCase__ = True
def lowercase_ ( self : Optional[Any], a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCamelCase__ = trie[char]
else:
return []
return self._elements(a_ )
def lowercase_ ( self : int, a_ : dict ):
"""simple docstring"""
UpperCamelCase__ = []
for c, v in d.items():
UpperCamelCase__ = [" "] if c == END else [(c + s) for s in self._elements(a_ )]
result.extend(a_ )
return tuple(a_ )
__lowercase: Tuple = Trie()
__lowercase: Any = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> tuple:
'''simple docstring'''
UpperCamelCase__ = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 358 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 31 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = 32
a :Tuple = embedder_hidden_size
# image encoding components
a :Optional[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a :Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a :int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
a :List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
a :Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
a :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
a :int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
a :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
a :Union[str, Any] = AutoencoderKL()
a :Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ):
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
a :List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
a :Any = input_image * 0.5 + 0.5
a :Optional[int] = input_image.clamp(0 , 1 )
a :List[str] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a :List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a :Any = self.get_dummy_components()
a :Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
a :List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :int = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
a :Dict = sd_pipe(**_lowerCamelCase ).images
a :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a :List[str] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
a :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
a :Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a :Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
a :List[str] = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
a :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
a :Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
a :List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a :Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
a :Optional[int] = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
a :int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a :int = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
a :Any = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a :str = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
a :List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 94 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """trajectory_transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = action_weight
__lowerCAmelCase : Tuple = reward_weight
__lowerCAmelCase : Union[str, Any] = value_weight
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : str = block_size
__lowerCAmelCase : Optional[Any] = action_dim
__lowerCAmelCase : Union[str, Any] = observation_dim
__lowerCAmelCase : Union[str, Any] = transition_dim
__lowerCAmelCase : Dict = learning_rate
__lowerCAmelCase : Any = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : Optional[int] = n_embd
__lowerCAmelCase : str = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[int] = resid_pdrop
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Any = kaiming_initializer_range
__lowerCAmelCase : List[str] = use_cache
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 275 | 0 |
from collections import defaultdict
from math import gcd
def __UpperCamelCase ( UpperCAmelCase = 150_0000 ):
lowercase__ : defaultdict = defaultdict(UpperCAmelCase )
lowercase__ : Union[str, Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase , 2 ):
if gcd(UpperCAmelCase , UpperCAmelCase ) > 1:
continue
lowercase__ : Optional[Any] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase , limit + 1 , UpperCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 350 | '''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0.9_9_9 , UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : str = []
for i in range(UpperCAmelCase ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase , dtype=torch.floataa )
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = "fixed_small_log" , __lowerCAmelCase = True , __lowerCAmelCase = 1.0 , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowercase__ : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
lowercase__ : List[Any] = 1.0 - self.betas
lowercase__ : int = torch.cumprod(self.alphas , dim=0 )
lowercase__ : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ : Optional[Any] = 1.0
# setable values
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
lowercase__ : Tuple = variance_type
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> torch.FloatTensor:
return sample
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]:
lowercase__ : List[str] = num_inference_steps
lowercase__ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ : List[str] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Tuple:
if prev_timestep is None:
lowercase__ : Any = t - 1
lowercase__ : Any = self.alphas_cumprod[t]
lowercase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : str = 1 - alpha_prod_t
lowercase__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Tuple = self.betas[t]
else:
lowercase__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : Any = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ : int = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
lowercase__ : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ : Union[str, Any] = variance.log()
lowercase__ : Optional[int] = beta.log()
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
lowercase__ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ , lowercase__ : str = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Dict = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ : int = t - 1
lowercase__ : Optional[int] = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Optional[int] = self.betas[t]
lowercase__ : Optional[Any] = self.alphas[t]
else:
lowercase__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : List[Any] = 0
if t > 0:
lowercase__ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
lowercase__ : Union[str, Any] = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ : List[Any] = variance
elif self.variance_type == "learned_range":
lowercase__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
lowercase__ : List[str] = variance * variance_noise
lowercase__ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase__ : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ : str = timesteps.to(original_samples.device )
lowercase__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowercase__ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : List[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 214 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = OmegaConf.load(__magic_name__ )
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )["""model"""]
lowercase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase__ = {}
lowercase__ = """first_stage_model."""
for key in keys:
if key.startswith(__magic_name__ ):
lowercase__ = state_dict[key]
# extract state_dict for UNetLDM
lowercase__ = {}
lowercase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__magic_name__ ):
lowercase__ = state_dict[key]
lowercase__ = config.model.params.first_stage_config.params
lowercase__ = config.model.params.unet_config.params
lowercase__ = VQModel(**__magic_name__ ).eval()
vqvae.load_state_dict(__magic_name__ )
lowercase__ = UNetLDMModel(**__magic_name__ ).eval()
unet.load_state_dict(__magic_name__ )
lowercase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__magic_name__ , )
lowercase__ = LDMPipeline(__magic_name__ , __magic_name__ , __magic_name__ )
pipeline.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
A : Dict = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 305 |
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
_a : str = gray_code_sequence_string(lowerCAmelCase_ )
#
# convert them to integers
for i in range(len(lowerCAmelCase_ ) ):
_a : Optional[int] = int(sequence[i] , 2 )
return sequence
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_a : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_a : List[str] = gray_code_sequence_string(bit_count - 1 )
_a : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_a : int = '0' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_a : Dict = '1' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __magic_name__ :
def __init__( self : Dict ,_UpperCAmelCase : Any ):
_a : Any = data
_a : Node | None = None
class __magic_name__ :
def __init__( self : Any ):
_a : int = None
_a : Optional[int] = None
def __iter__( self : Optional[int] ):
_a : List[Any] = self.head
while self.head:
yield node.data
_a : str = node.next
if node == self.head:
break
def __len__( self : Any ):
return sum(1 for _ in self )
def __repr__( self : int ):
return "->".join(str(_UpperCAmelCase ) for item in iter(self ) )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_a : List[str] = Node(_UpperCAmelCase )
if self.head is None:
_a : Tuple = new_node # first node points itself
_a : int = new_node
elif index == 0: # insert at head
_a : Any = self.head
_a : Tuple = new_node
else:
_a : Any = self.head
for _ in range(index - 1 ):
_a : int = temp.next
_a : Optional[int] = temp.next
_a : int = new_node
if index == len(self ) - 1: # insert at tail
_a : Optional[int] = new_node
def __lowercase ( self : List[Any] ):
return self.delete_nth(0 )
def __lowercase ( self : Dict ):
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_a : Optional[int] = self.head
if self.head == self.tail: # just one node
_a : Optional[int] = None
elif index == 0: # delete head node
_a : Dict = self.tail.next.next
_a : Dict = self.head.next
else:
_a : List[Any] = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : Optional[int] = temp.next
_a : List[str] = temp.next.next
if index == len(self ) - 1: # delete at tail
_a : int = temp
return delete_node.data
def __lowercase ( self : int ):
return len(self ) == 0
def __lowerCamelCase ( ) -> None:
_a : int = CircularLinkedList()
assert len(lowerCAmelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowerCAmelCase_ ) == i
circular_linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = 1.5
__lowerCamelCase = int(factor * num_class_images )
__lowerCamelCase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=UpperCamelCase__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__lowerCamelCase = client.query(text=UpperCamelCase__ )
if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__lowerCamelCase = int(factor * num_images )
__lowerCamelCase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ )
with open(F"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(F"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
F"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
__lowerCamelCase = class_images[count]
count += 1
try:
__lowerCamelCase = requests.get(images['url'] )
if img.status_code == 200:
__lowerCamelCase = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser('' , add_help=UpperCamelCase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
__A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 90 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__lowerCamelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.reshape(lowerCamelCase__ , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.asarray(reshape(lowerCamelCase__ , (12, 5) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 90 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
UpperCAmelCase_ : str = {
'input_ids': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : int = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 145 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print('Building PyTorch model from configuration: {}'.format(str(__snake_case ) ) )
UpperCAmelCase_ : Dict = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print('Save PyTorch model to {}'.format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 145 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a : Optional[Any] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
if "." in tensor_name:
snake_case_ = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
snake_case_ = new_module
snake_case_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
snake_case_ = tensor_name in module._buffers
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
snake_case_ = False
snake_case_ = False
if is_buffer or not is_bitsandbytes_available():
snake_case_ = False
snake_case_ = False
else:
snake_case_ = hasattr(bnb.nn, '''Params4bit''' ) and isinstance(module._parameters[tensor_name], bnb.nn.Paramsabit )
snake_case_ = isinstance(module._parameters[tensor_name], bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case_ = old_value.to(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase, torch.Tensor ):
snake_case_ = value.to('''cpu''' )
if value.dtype == torch.inta:
snake_case_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
snake_case_ = torch.tensor(__UpperCAmelCase, device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, __UpperCAmelCase ) and fpaa_statistics is None:
snake_case_ = new_value.T
snake_case_ = old_value.__dict__
if is_abit:
snake_case_ = bnb.nn.IntaParams(__UpperCAmelCase, requires_grad=__UpperCAmelCase, **__UpperCAmelCase ).to(__UpperCAmelCase )
elif is_abit:
snake_case_ = bnb.nn.Paramsabit(__UpperCAmelCase, requires_grad=__UpperCAmelCase, **__UpperCAmelCase ).to(__UpperCAmelCase )
snake_case_ = new_value
if fpaa_statistics is not None:
setattr(module.weight, '''SCB''', fpaa_statistics.to(__UpperCAmelCase ) )
else:
if value is None:
snake_case_ = old_value.to(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase, torch.Tensor ):
snake_case_ = value.to(__UpperCAmelCase )
else:
snake_case_ = torch.tensor(__UpperCAmelCase, device=__UpperCAmelCase )
if is_buffer:
snake_case_ = new_value
else:
snake_case_ = nn.Parameter(__UpperCAmelCase, requires_grad=old_value.requires_grad )
snake_case_ = new_value
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(__UpperCAmelCase )
if (isinstance(__UpperCAmelCase, nn.Linear ) or isinstance(__UpperCAmelCase, __UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ ,snake_case_ = module.weight.shape
else:
snake_case_ = module.in_features
snake_case_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case_ = bnb.nn.LinearabitLt(
__UpperCAmelCase, __UpperCAmelCase, module.bias is not None, has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight, threshold=quantization_config.llm_inta_threshold, )
snake_case_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case_ = bnb.nn.Linearabit(
__UpperCAmelCase, __UpperCAmelCase, module.bias is not None, quantization_config.bnb_abit_compute_dtype, compress_statistics=quantization_config.bnb_abit_use_double_quant, quant_type=quantization_config.bnb_abit_quant_type, )
snake_case_ = True
# Store the module class in case we need to transpose the weight later
snake_case_ = type(__UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__UpperCAmelCase )
if len(list(module.children() ) ) > 0:
snake_case_ ,snake_case_ = _replace_with_bnb_linear(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, has_been_replaced=__UpperCAmelCase, )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> str:
'''simple docstring'''
snake_case_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case_ ,snake_case_ = _replace_with_bnb_linear(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''', __UpperCAmelCase, )
return replace_with_bnb_linear(*__UpperCAmelCase, **__UpperCAmelCase )
def __magic_name__ ( *__UpperCAmelCase, **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''', __UpperCAmelCase, )
return set_module_quantized_tensor_to_device(*__UpperCAmelCase, **__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = deepcopy(__UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case_ = find_tied_parameters(__UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(__UpperCAmelCase, [] )
snake_case_ = len(__UpperCAmelCase ) > 0
# Check if it is a base model
snake_case_ = not hasattr(__UpperCAmelCase, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(__UpperCAmelCase ) - set(__UpperCAmelCase )
snake_case_ = list(set(__UpperCAmelCase ) ) + list(__UpperCAmelCase )
# remove ".weight" from the keys
snake_case_ = ['''.weight''', '''.bias''']
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(__UpperCAmelCase, '''''' )
filtered_module_names.append(__UpperCAmelCase )
return filtered_module_names
| 56 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _lowerCAmelCase : List[Any]=10_000 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="relu" , _lowerCAmelCase : Union[str, Any]=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=6_000 , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=(5, 5) , _lowerCAmelCase : str=1_024 , _lowerCAmelCase : str=80 , _lowerCAmelCase : Tuple=1 , **_lowerCAmelCase : Any , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) | 225 | 0 |
from copy import deepcopy
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : list[int] | None = None , UpperCAmelCase_ : int | None = None ):
if arr is None and size is not None:
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : List[str] = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError("Either arr or size must be specified" )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : list[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
SCREAMING_SNAKE_CASE : List[Any] = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _A ( UpperCAmelCase_ : int ):
return index + (index & (-index))
@staticmethod
def _A ( UpperCAmelCase_ : int ):
return index - (index & (-index))
def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE : Any = self.next_(UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def _A ( self : Dict , UpperCAmelCase_ : int ):
if right == 0:
return 0
SCREAMING_SNAKE_CASE : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE : List[str] = self.prev(UpperCAmelCase_ )
return result
def _A ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : int ):
return self.query(UpperCAmelCase_ , index + 1 )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE : int = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE : List[str] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319 | 0 |
from __future__ import annotations
__snake_case :str = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) )
] # the reference grid
__a = 1
__a = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) )
] # the action grid
__a = init[0]
__a = init[1]
__a = 0
__a = g + heuristic[x][y] # cost from starting cell to destination cell
__a = [[f, g, x, y]]
__a = False # flag that is set when search is complete
__a = False # flag set if we can't find expand
while not found and not resign:
if len(_UpperCAmelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__a = cell.pop()
__a = next_cell[2]
__a = next_cell[3]
__a = next_cell[1]
if x == goal[0] and y == goal[1]:
__a = True
else:
for i in range(len(_UpperCAmelCase ) ): # to try out different valid actions
__a = x + DIRECTIONS[i][0]
__a = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__a = g + cost
__a = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__a = 1
__a = i
__a = []
__a = goal[0]
__a = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__a = x - DIRECTIONS[action[x][y]][0]
__a = y - DIRECTIONS[action[x][y]][1]
__a = xa
__a = ya
invpath.append([x, y] )
__a = []
for i in range(len(_UpperCAmelCase ) ):
path.append(invpath[len(_UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__snake_case :Dict = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__snake_case :List[Any] = [0, 0]
# all coordinates are given in format [y,x]
__snake_case :Tuple = [len(grid) - 1, len(grid[0]) - 1]
__snake_case :Any = 1
# the cost map which pushes the path closer to the goal
__snake_case :Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__snake_case :Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__snake_case :int = 99
__snake_case ,__snake_case :int = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 49 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[Any] = "▁"
A_ : str = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Union[str, Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : List[str] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __UpperCAmelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 165 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_SCREAMING_SNAKE_CASE = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
_SCREAMING_SNAKE_CASE = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith('''emb.''' ):
__lowercase = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__lowercase = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , lowerCamelCase_ )
# ffn -> feed_forward
__lowercase = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__lowercase = '''rwkv.''' + name
__lowercase = weight
return state_dict
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : int=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__lowercase = 5_0_2_7_7
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if index is not None:
__lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n'''
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 217 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowerCAmelCase = {'''allegro/herbert-base-cased''': 514}
__lowerCAmelCase = {}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = HerbertTokenizer
def __init__( self : Dict ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[int]="<s>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Dict="<pad>" ,_UpperCAmelCase : List[Any]="<mask>" ,_UpperCAmelCase : Optional[int]="</s>" ,**_UpperCAmelCase : Union[str, Any] ,):
super().__init__(
_UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : str ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : List[Any] = [self.cls_token_id]
_a : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def __lowercase ( self : int ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Tuple = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 89 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 1 |
from __future__ import annotations
from random import choice
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return choice(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = random_pivot(__UpperCAmelCase )
# partition based on pivot
# linear time
UpperCAmelCase_ = [e for e in lst if e < pivot]
UpperCAmelCase_ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCAmelCase ) < k - 1:
return kth_number(__UpperCAmelCase , k - len(__UpperCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Optional[int] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__UpperCAmelCase, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCAmelCase, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class a ( _lowerCamelCase ):
snake_case_ = ["pixel_values"]
def __init__( self : Union[str, Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ):
super().__init__(**lowercase_ )
snake_case_ = size if size is not None else {'''shortest_edge''': 224}
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_ )
elif "height" in size and "width" in size:
snake_case_ = (size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ):
snake_case_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case_ = to_numpy_array(lowercase_ )
if do_resize:
snake_case_ = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ )
if do_center_crop:
snake_case_ = self.center_crop(lowercase_ , size=lowercase_ )
if do_rescale:
snake_case_ = self.rescale(image=lowercase_ , scale=lowercase_ )
if do_normalize:
snake_case_ = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ )
snake_case_ = to_channel_dimension_format(lowercase_ , lowercase_ )
return image
def A_ ( self : Tuple , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Any , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
snake_case_ = make_batched(lowercase_ )
snake_case_ = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
snake_case_ = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 56 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
A = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _snake_case ( ):
UpperCAmelCase : int = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : List[str] = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
UpperCAmelCase : Optional[int] = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 371 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> None:
'''simple docstring'''
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Node()
UpperCAmelCase : Dict = current_node
UpperCAmelCase : Any = current_node
UpperCAmelCase : Optional[int] = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = Node()
UpperCAmelCase : Tuple = current_node
UpperCAmelCase : Any = previous_node
UpperCAmelCase : List[Any] = current_node
UpperCAmelCase : List[str] = self.front
UpperCAmelCase : Tuple = previous_node
def SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def SCREAMING_SNAKE_CASE ( self ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase : Optional[Any] = self.rear.next
if self.rear:
UpperCAmelCase : Optional[int] = data
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase : Tuple = self.front.data
UpperCAmelCase : int = None
return data
UpperCAmelCase : Dict = self.front
UpperCAmelCase : Tuple = old_front.next
UpperCAmelCase : str = old_front.data
UpperCAmelCase : int = None
return data
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Any | None = None
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 | 1 |
from sklearn.metrics import recall_score
import datasets
lowerCamelCase = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
lowerCamelCase = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
lowerCamelCase = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
def UpperCAmelCase__( self : Optional[Any] )-> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Tuple=1 , _SCREAMING_SNAKE_CASE : List[str]="binary" , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Any="warn" , )-> str:
lowerCAmelCase__ : Any = recall_score(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , pos_label=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE , zero_division=_SCREAMING_SNAKE_CASE , )
return {"recall": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 211 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _a , _a=1_000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase__ : int = n - 1
lowerCAmelCase__ : Any = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase__ : Optional[Any] = 0
while count < prec:
lowerCAmelCase__ : Optional[Any] = random.randint(2 , n - 1 )
lowerCAmelCase__ : List[Any] = bin_exp_mod(_a , _a , _a )
if b != 1:
lowerCAmelCase__ : Dict = True
for _ in range(_a ):
if b == n - 1:
lowerCAmelCase__ : Union[str, Any] = False
break
lowerCAmelCase__ : Tuple = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 211 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = torch.nn.Linear(10 , 10 )
_snake_case = torch.optim.SGD(model.parameters() , 0.1 )
_snake_case = Accelerator()
_snake_case = accelerator.prepare(lowerCAmelCase_ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase_ ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 42 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __magic_name__ ( A : Tuple, A : List[Any], A : List[Any], A : Dict ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __magic_name__ ( A : List[Any], A : int, A : Optional[Any], A : Optional[int], A : Any=True ):
'''simple docstring'''
model.train()
a = model(A )
a = F.mse_loss(A, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(A )
def __magic_name__ ( A : Any, A : Any=False ):
'''simple docstring'''
set_seed(42 )
a = RegressionModel()
a = deepcopy(A )
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
model.to(accelerator.device )
if sched:
a = AdamW(params=model.parameters(), lr=1E-3 )
a = AdamW(params=ddp_model.parameters(), lr=1E-3 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
# Make a copy of `model`
if sched:
a , a , a , a = accelerator.prepare(A, A, A, A )
else:
a , a = accelerator.prepare(A, A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(A, A, A, A )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : List[Any]=False, A : List[Any]=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a = get_training_setup(A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A, A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(A ):
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
GradientState._reset_state()
def __magic_name__ ( A : List[Any]=False, A : Any=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a , a , a , a , a = get_training_setup(A, A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(A, A, A, A, A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(A ):
step_model(A, A, A, A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A ))
if accelerator.num_processes > 1:
check_model_parameters(A, A, A, A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
a = RegressionDataset(length=96 )
a = DataLoader(A, batch_size=16 )
a , a = accelerator.prepare(A, A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if iteration < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if batch_num < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(A, A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(A, A )
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 107 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = int(number**0.5 )
return number == sq * sq
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_snake_case = x_den * y_den * z_den
_snake_case = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 35 ):
_snake_case = set()
_snake_case = 42
_snake_case = Fraction(0 )
_snake_case = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_snake_case = x_num * y_den + x_den * y_num
_snake_case = x_den * y_den
_snake_case = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_snake_case = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_snake_case = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_snake_case = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_snake_case = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_snake_case = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_snake_case = x_num * y_num
_snake_case = x_den * y_num + x_num * y_den
_snake_case = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_snake_case = x_num * x_num * y_num * y_num
_snake_case = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_snake_case = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_snake_case = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_snake_case = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''') | 360 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = hf_hub_url(repo_id=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_SCREAMING_SNAKE_CASE )}""" | 270 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( _lowercase):
"""simple docstring"""
UpperCamelCase__ = """char"""
UpperCamelCase__ = """bpe"""
UpperCamelCase__ = """wp"""
__A : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( _lowercase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """char_tokenizer"""]
UpperCamelCase__ = """ViTImageProcessor"""
UpperCamelCase__ = """MgpstrTokenizer"""
def __init__( self : str , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Tuple )->List[Any]:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
_UpperCAmelCase = tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained('''gpt2''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__a , __a )
def __call__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : int )->int:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
_UpperCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None:
_UpperCAmelCase = self.char_tokenizer(__a , return_tensors=__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings['''input_ids''']
return inputs
def lowercase__ ( self : int , __UpperCamelCase : Tuple )->Any:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = sequences
_UpperCAmelCase = char_preds.size(0 )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__a , '''char''' )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__a , '''bpe''' )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__a , '''wp''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(__a ):
_UpperCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
_UpperCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
_UpperCAmelCase = scores.index(max(__a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_UpperCAmelCase = {}
_UpperCAmelCase = final_strs
_UpperCAmelCase = final_scores
_UpperCAmelCase = char_strs
_UpperCAmelCase = bpe_strs
_UpperCAmelCase = wp_strs
return out
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple )->Tuple:
if format == DecodeType.CHARACTER:
_UpperCAmelCase = self.char_decode
_UpperCAmelCase = 1
_UpperCAmelCase = '''[s]'''
elif format == DecodeType.BPE:
_UpperCAmelCase = self.bpe_decode
_UpperCAmelCase = 2
_UpperCAmelCase = '''#'''
elif format == DecodeType.WORDPIECE:
_UpperCAmelCase = self.wp_decode
_UpperCAmelCase = 1_0_2
_UpperCAmelCase = '''[SEP]'''
else:
raise ValueError(F'Format {format} is not supported.' )
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = pred_logits.size(0 )
_UpperCAmelCase = pred_logits.size(1 )
_UpperCAmelCase , _UpperCAmelCase = pred_logits.topk(1 , dim=-1 , largest=__a , sorted=__a )
_UpperCAmelCase = preds_index.view(-1 , __a )[:, 1:]
_UpperCAmelCase = decoder(__a )
_UpperCAmelCase , _UpperCAmelCase = torch.nn.functional.softmax(__a , dim=2 ).max(dim=2 )
_UpperCAmelCase = preds_max_prob[:, 1:]
for index in range(__a ):
_UpperCAmelCase = preds_str[index].find(__a )
_UpperCAmelCase = preds_str[index][:pred_eos]
_UpperCAmelCase = preds_index[index].cpu().tolist()
_UpperCAmelCase = pred_index.index(__a ) if eos_token in pred_index else -1
_UpperCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
_UpperCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__a )
conf_scores.append(__a )
return dec_strs, conf_scores
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__a )]
return decode_strs
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[Any] )->Tuple:
return self.bpe_tokenizer.batch_decode(__a )
def lowercase__ ( self : Tuple , __UpperCamelCase : Dict )->int:
_UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__a )]
return decode_strs
| 260 |
"""simple docstring"""
import re
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 153 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 212 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowerCAmelCase__ : int = TaConfig.from_json_file(__UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 212 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , _snake_case , _snake_case , _snake_case=1024 , _snake_case=1024 , _snake_case=3.6 ):
"""simple docstring"""
_lowerCAmelCase = tokenizer
_lowerCAmelCase = tokenizer.bos_token_id
_lowerCAmelCase = dataset
_lowerCAmelCase = seq_length
_lowerCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = iter(self.dataset )
_lowerCAmelCase = True
while more_examples:
_lowerCAmelCase , _lowerCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_snake_case )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCAmelCase = False
break
_lowerCAmelCase = tokenizer(_snake_case , truncation=_snake_case )["""input_ids"""]
_lowerCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_snake_case ) , self.seq_length ):
_lowerCAmelCase = all_token_ids[i : i + self.seq_length]
if len(_snake_case ) == self.seq_length:
yield torch.tensor(_snake_case )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = {"""streaming""": True}
_lowerCAmelCase = load_dataset(args.dataset_name , split="""train""" , **snake_case )
_lowerCAmelCase = ConstantLengthDataset(snake_case , snake_case , seq_length=args.seq_length )
_lowerCAmelCase = DataLoader(snake_case , batch_size=args.batch_size )
return eval_dataloader
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
model.eval()
_lowerCAmelCase = []
for step, batch in enumerate(snake_case ):
with torch.no_grad():
_lowerCAmelCase = model(snake_case , labels=snake_case )
_lowerCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(snake_case ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCAmelCase = torch.mean(torch.cat(snake_case ) )
try:
_lowerCAmelCase = torch.exp(snake_case )
except OverflowError:
_lowerCAmelCase = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
A__ = Accelerator()
# Parse configuration
A__ = HfArgumentParser(EvaluationArguments)
A__ = parser.parse_args()
set_seed(args.seed)
# Logging
A__ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
A__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
A__ , A__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
A__ , A__ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 82 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if isinstance(snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F'Difference between torch and flax is {diff} (>= {tol}).' )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFViTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFRobertaModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1e-3 ) )
| 82 | 1 |
import random
from typing import Any
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ : Optional[int] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
snake_case_ = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ : List[Any] ={
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] =['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] =[
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any =['''LayoutLMv3FeatureExtractor''']
A__ : Optional[int] =['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A__ : List[str] =TypeVar('''T''')
class UpperCAmelCase ( Generic[T] ):
def __init__( self : Tuple , __snake_case : bool = True ) -> None:
_lowerCAmelCase = {} # dictionary of lists
_lowerCAmelCase = directed
def lowercase__ ( self : Union[str, Any] , __snake_case : T , __snake_case : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
self.adj_list[destination_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
_lowerCAmelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__snake_case )
_lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase = [destination_vertex]
_lowerCAmelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
_lowerCAmelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase = [destination_vertex]
_lowerCAmelCase = []
return self
def __repr__( self : int ) -> str:
return pformat(self.adj_list )
| 220 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
def lowercase ( self : List[Any] ) -> int:
lowercase : int = tempfile.mkdtemp()
# fmt: off
lowercase : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase : Dict = dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowercase : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase : Union[str, Any] = {'unk_token': '<unk>'}
lowercase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowercase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
lowercase : int = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase : int = os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Union[str, Any], **lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token='!', **lowerCAmelCase )
def lowercase ( self : int, **lowerCAmelCase : List[Any] ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token='!', **lowerCAmelCase )
def lowercase ( self : Optional[int], **lowerCAmelCase : Union[str, Any] ) -> Any:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Union[str, Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Tuple ) -> Optional[int]:
lowercase : List[str] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase : Tuple = [Image.fromarray(np.moveaxis(lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : List[str] ) -> Dict:
lowercase : Any = self.get_tokenizer()
lowercase : Optional[int] = self.get_rust_tokenizer()
lowercase : int = self.get_image_processor()
lowercase : List[Any] = OwlViTProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase )
lowercase : Optional[int] = OwlViTProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase )
def lowercase ( self : List[str] ) -> str:
lowercase : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
lowercase : List[str] = self.get_image_processor(do_normalize=lowerCAmelCase )
lowercase : Tuple = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase )
def lowercase ( self : str ) -> Optional[int]:
lowercase : int = self.get_image_processor()
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : Any = OwlViTProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : str = self.prepare_image_inputs()
lowercase : Any = image_processor(lowerCAmelCase, return_tensors='np' )
lowercase : Any = processor(images=lowerCAmelCase, return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase ( self : int ) -> int:
lowercase : Dict = self.get_image_processor()
lowercase : List[Any] = self.get_tokenizer()
lowercase : int = OwlViTProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : str = 'lower newer'
lowercase : Union[str, Any] = processor(text=lowerCAmelCase, return_tensors='np' )
lowercase : Optional[Any] = tokenizer(lowerCAmelCase, return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def lowercase ( self : List[str] ) -> Optional[int]:
lowercase : int = self.get_image_processor()
lowercase : Any = self.get_tokenizer()
lowercase : str = OwlViTProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : Dict = 'lower newer'
lowercase : List[str] = self.prepare_image_inputs()
lowercase : int = processor(text=lowerCAmelCase, images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def lowercase ( self : Tuple ) -> Optional[int]:
lowercase : str = 'google/owlvit-base-patch32'
lowercase : str = OwlViTProcessor.from_pretrained(lowerCAmelCase )
lowercase : Dict = ['cat', 'nasa badge']
lowercase : Any = processor(text=lowerCAmelCase )
lowercase : Any = 16
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase : Dict = 'google/owlvit-base-patch32'
lowercase : List[str] = OwlViTProcessor.from_pretrained(lowerCAmelCase )
lowercase : Optional[Any] = [['cat', 'nasa badge'], ['person']]
lowercase : List[Any] = processor(text=lowerCAmelCase )
lowercase : List[str] = 16
lowercase : Union[str, Any] = len(lowerCAmelCase )
lowercase : List[Any] = max([len(lowerCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def lowercase ( self : List[str] ) -> List[Any]:
lowercase : str = 'google/owlvit-base-patch32'
lowercase : Tuple = OwlViTProcessor.from_pretrained(lowerCAmelCase )
lowercase : Optional[int] = ['cat', 'nasa badge']
lowercase : Tuple = processor(text=lowerCAmelCase )
lowercase : Optional[int] = 16
lowercase : Optional[int] = inputs['input_ids']
lowercase : int = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def lowercase ( self : Union[str, Any] ) -> Dict:
lowercase : List[Any] = self.get_image_processor()
lowercase : str = self.get_tokenizer()
lowercase : Optional[int] = OwlViTProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : Optional[Any] = self.prepare_image_inputs()
lowercase : List[str] = self.prepare_image_inputs()
lowercase : Dict = processor(images=lowerCAmelCase, query_images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def lowercase ( self : Union[str, Any] ) -> int:
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : List[str] = self.get_tokenizer()
lowercase : Union[str, Any] = OwlViTProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : List[Any] = processor.batch_decode(lowerCAmelCase )
lowercase : Optional[int] = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
| 255 |
"""simple docstring"""
_UpperCamelCase: Dict = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
_UpperCamelCase: Optional[int] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
_UpperCamelCase: int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
_UpperCamelCase: List[str] = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
_UpperCamelCase: Any = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
_UpperCamelCase: str = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
_UpperCamelCase: Optional[Any] = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
_UpperCamelCase: Optional[int] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 255 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowerCamelCase )
lowercase__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase__ = dataset_size < in_memory_max_size
else:
lowercase__ = False
lowercase__ = is_small_dataset(_lowerCamelCase )
assert result == expected
| 363 |
from collections.abc import Sequence
from queue import Queue
class _a :
def __init__( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Dict=None ) -> Tuple:
"""simple docstring"""
lowercase__ = start
lowercase__ = end
lowercase__ = val
lowercase__ = (start + end) // 2
lowercase__ = left
lowercase__ = right
def __repr__( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class _a :
def __init__( self: Any , UpperCamelCase_: Sequence , UpperCamelCase_: Any ) -> List[str]:
"""simple docstring"""
lowercase__ = collection
lowercase__ = function
if self.collection:
lowercase__ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: int , UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
lowercase__ = (start + end) // 2
lowercase__ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ) -> Dict:
"""simple docstring"""
if node.start == i and node.end == i:
lowercase__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.fn(node.left.val , node.right.val )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Dict ) -> List[Any]:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
if self.root is not None:
lowercase__ = Queue()
queue.put(self.root )
while not queue.empty():
lowercase__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 93 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class lowercase__ ( lowercase_ ):
'''simple docstring'''
a : Tuple = "gpt_bigcode"
a : Union[str, Any] = ["past_key_values"]
a : List[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, __magic_name__=50257, __magic_name__=1024, __magic_name__=768, __magic_name__=12, __magic_name__=12, __magic_name__=None, __magic_name__="gelu_pytorch_tanh", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=0.1, __magic_name__=1E-5, __magic_name__=0.02, __magic_name__=True, __magic_name__=True, __magic_name__=50256, __magic_name__=50256, __magic_name__=True, __magic_name__=True, __magic_name__=True, **__magic_name__, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : str = n_positions
UpperCamelCase__ : Tuple = n_embd
UpperCamelCase__ : List[str] = n_layer
UpperCamelCase__ : Any = n_head
UpperCamelCase__ : Tuple = n_inner
UpperCamelCase__ : Dict = activation_function
UpperCamelCase__ : int = resid_pdrop
UpperCamelCase__ : Any = embd_pdrop
UpperCamelCase__ : Optional[int] = attn_pdrop
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : int = scale_attn_weights
UpperCamelCase__ : Any = use_cache
UpperCamelCase__ : Dict = attention_softmax_in_fpaa
UpperCamelCase__ : str = scale_attention_softmax_in_fpaa
UpperCamelCase__ : Tuple = multi_query
UpperCamelCase__ : Any = bos_token_id
UpperCamelCase__ : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 201 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Any = '<pad>'
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase_ : Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Any = 'Hello World!'
lowerCAmelCase_ : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase_ : int = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
# fmt: off
lowerCAmelCase_ : List[str] = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 224 | 0 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=32,layers_per_block=1,block_out_channels=[32, 64],down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
],mid_block_type="""UNetMidBlock2DSimpleCrossAttn""",up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""],in_channels=3,out_channels=6,cross_attention_dim=32,encoder_hid_dim=32,attention_head_dim=8,addition_embed_type="""text""",addition_embed_type_num_heads=2,cross_attention_norm="""group_norm""",resnet_time_scale_shift="""scale_shift""",act_fn="""gelu""",)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
num_train_timesteps=10_00,beta_schedule="""squaredcos_cap_v2""",beta_start=0.0001,beta_end=0.02,thresholding=__SCREAMING_SNAKE_CASE,dynamic_thresholding_ratio=0.95,sample_max_value=1.0,prediction_type="""epsilon""",variance_type="""learned_range""",)
torch.manual_seed(0 )
__lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=32,layers_per_block=[1, 2],block_out_channels=[32, 64],down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
],mid_block_type="""UNetMidBlock2DSimpleCrossAttn""",up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""],in_channels=6,out_channels=6,cross_attention_dim=32,encoder_hid_dim=32,attention_head_dim=8,addition_embed_type="""text""",addition_embed_type_num_heads=2,cross_attention_norm="""group_norm""",resnet_time_scale_shift="""scale_shift""",act_fn="""gelu""",class_embed_type="""timestep""",mid_block_scale_factor=1.414,time_embedding_act_fn="""gelu""",time_embedding_dim=32,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
num_train_timesteps=10_00,beta_schedule="""squaredcos_cap_v2""",beta_start=0.0001,beta_end=0.02,thresholding=__SCREAMING_SNAKE_CASE,dynamic_thresholding_ratio=0.95,sample_max_value=1.0,prediction_type="""epsilon""",variance_type="""learned_range""",)
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
num_train_timesteps=10_00,beta_schedule="""squaredcos_cap_v2""",beta_start=0.0001,beta_end=0.02,)
torch.manual_seed(0 )
__lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inputs["""prompt"""]
__lowerCAmelCase = inputs["""generator"""]
__lowerCAmelCase = inputs["""num_inference_steps"""]
__lowerCAmelCase = inputs["""output_type"""]
if "image" in inputs:
__lowerCAmelCase = inputs["""image"""]
else:
__lowerCAmelCase = None
if "mask_image" in inputs:
__lowerCAmelCase = inputs["""mask_image"""]
else:
__lowerCAmelCase = None
if "original_image" in inputs:
__lowerCAmelCase = inputs["""original_image"""]
else:
__lowerCAmelCase = None
__lowerCAmelCase , __lowerCAmelCase = pipe.encode_prompt(__SCREAMING_SNAKE_CASE )
# inputs with prompt converted to embeddings
__lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
__lowerCAmelCase = image
if mask_image is not None:
__lowerCAmelCase = mask_image
if original_image is not None:
__lowerCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) is None,f'`{optional_component}` did not stay set to None after loading.',)
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inputs["""generator"""]
__lowerCAmelCase = inputs["""num_inference_steps"""]
__lowerCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
__lowerCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
__lowerCAmelCase = image
if mask_image is not None:
__lowerCAmelCase = mask_image
if original_image is not None:
__lowerCAmelCase = original_image
__lowerCAmelCase = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE,1e-4 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE,1e-4 )
| 365 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : List[str] = """▁"""
_a : Optional[int] = {"""vocab_file""": """spiece.model"""}
_a : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_a : int = {
"""google/pegasus-xsum""": 5_1_2,
}
_a : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =VOCAB_FILES_NAMES
a : Tuple =VOCAB_FILES_NAMES
a : Any =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<mask_2>",__SCREAMING_SNAKE_CASE="<mask_1>",__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=1_03,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise TypeError(
f'additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is'
f' {type(__SCREAMING_SNAKE_CASE )}' )
__lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__SCREAMING_SNAKE_CASE ),self.offset - 1 )
]
if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowerCAmelCase = additional_special_tokens_extended
else:
__lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2,self.offset )]
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token_sent=__SCREAMING_SNAKE_CASE,offset=__SCREAMING_SNAKE_CASE,additional_special_tokens=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = mask_token_sent
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
__lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1,self.offset - 1 )} )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase = self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
return 1
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 46 | 0 |
"""simple docstring"""
import os
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(grid[0] )
__SCREAMING_SNAKE_CASE = len(snake_case_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(snake_case_ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE = max(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if max_product > largest:
__SCREAMING_SNAKE_CASE = max_product
return largest
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = []
with open(os.path.dirname(snake_case_ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__SCREAMING_SNAKE_CASE = [[int(snake_case_ ) for i in grid[j]] for j in range(len(snake_case_ ) )]
return largest_product(snake_case_ )
if __name__ == "__main__":
print(solution())
| 100 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133 | 0 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
UpperCamelCase : Union[str, Any] = ["a", "b", "c"]
# Defaults to last layer if both are None
UpperCamelCase : int = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ["""c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [2] )
# Out indices set to match out features
UpperCamelCase : str = get_aligned_output_features_output_indices(["""a""", """c"""] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ["""a""", """c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [0, 2] )
# Out features set to match out indices
UpperCamelCase : Dict = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE_ , [0, 2] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ["""a""", """c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [0, 2] )
# Out features selected from negative indices
UpperCamelCase : str = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE_ , [-3, -1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ["""a""", """c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [-3, -1] )
def a_ ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , SCREAMING_SNAKE_CASE_ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE_ , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE_ , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def a_ ( self ):
UpperCamelCase : str = BackboneMixin()
UpperCamelCase : List[Any] = ["a", "b", "c"]
UpperCamelCase : Dict = ["a", "c"]
UpperCamelCase : int = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCamelCase : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCamelCase : Tuple = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 353 |
"""simple docstring"""
import argparse
import os
import re
__A : Dict = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__A : Union[str, Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Dict = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : List[str] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__A : Tuple = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : Tuple = re.compile(R'''\[([^\]]+)\]''')
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : Dict="" ,snake_case_ : Dict=None ,snake_case_ : Any=None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = 0
UpperCamelCase : List[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
UpperCamelCase : Optional[Any] = ["""\n""".join(lines[:index] )]
else:
UpperCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase : Any = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
UpperCamelCase : Any = [lines[index + 1]]
index += 1
else:
UpperCamelCase : List[str] = []
else:
blocks.append("""\n""".join(snake_case_ ) )
UpperCamelCase : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append("""\n""".join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
def _inner(snake_case_ : Tuple ):
return key(snake_case_ ).lower().replace("""_""" ,"""""" )
return _inner
def A_ ( snake_case_ : List[Any] ,snake_case_ : Optional[int]=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(snake_case_ : Dict ):
return x
if key is None:
UpperCamelCase : int = noop
# Constants are all uppercase, they go first.
UpperCamelCase : List[Any] = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase : str = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase : List[str] = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
UpperCamelCase : Tuple = ignore_underscore(snake_case_ )
return sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ )
def A_ ( snake_case_ : int ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(snake_case_ : List[Any] ):
UpperCamelCase : Any = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
UpperCamelCase : Union[str, Any] = [part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[str] = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(snake_case_ )] ) + "]"
UpperCamelCase : str = import_statement.split("""\n""" )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase : str = 2 if lines[1].strip() == """[""" else 1
UpperCamelCase : Dict = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase : int = sort_objects(snake_case_ ,key=lambda snake_case_ : x[1] )
UpperCamelCase : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase : List[Any] = _re_bracket_content.sub(_replace ,lines[1] )
else:
UpperCamelCase : Optional[Any] = [part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[Any] = keys[:-1]
UpperCamelCase : int = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase : List[str] = _re_bracket_content.sub(_replace ,snake_case_ )
return import_statement
def A_ ( snake_case_ : Tuple ,snake_case_ : str=True ):
'''simple docstring'''
with open(snake_case_ ,"""r""" ) as f:
UpperCamelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase : Dict = split_code_in_indented_blocks(
snake_case_ ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase : Optional[Any] = main_blocks[block_idx]
UpperCamelCase : Optional[int] = block.split("""\n""" )
# Get to the start of the imports.
UpperCamelCase : Union[str, Any] = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase : List[str] = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase : Dict = """\n""".join(block_lines[line_idx:-1] )
UpperCamelCase : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase : Optional[int] = split_code_in_indented_blocks(snake_case_ ,indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase : Union[str, Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase : Union[str, Any] = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
UpperCamelCase : List[Any] = [x[0] for x in sorted(snake_case_ ,key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(snake_case_ ,"""w""" ) as f:
f.write("""\n""".join(snake_case_ ) )
def A_ ( snake_case_ : int=True ):
'''simple docstring'''
UpperCamelCase : Any = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
UpperCamelCase : Union[str, Any] = sort_imports(os.path.join(snake_case_ ,"""__init__.py""" ) ,check_only=snake_case_ )
if result:
UpperCamelCase : Any = [os.path.join(snake_case_ ,"""__init__.py""" )]
if len(snake_case_ ) > 0:
raise ValueError(f'Would overwrite {len(snake_case_ )} files, run `make style`.' )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 27 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _UpperCAmelCase ( _A , _A ):
SCREAMING_SNAKE_CASE_ : List[Any] = "resnet"
SCREAMING_SNAKE_CASE_ : Tuple = ["basic", "bottleneck"]
def __init__( self : Any , A : Tuple=3 , A : str=64 , A : Tuple=[2_56, 5_12, 10_24, 20_48] , A : List[Any]=[3, 4, 6, 3] , A : Union[str, Any]="bottleneck" , A : int="relu" , A : List[Any]=False , A : Tuple=None , A : int=None , **A : List[str] , ) -> List[Any]:
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowercase_ : List[Any] = num_channels
lowercase_ : Tuple = embedding_size
lowercase_ : Dict = hidden_sizes
lowercase_ : Tuple = depths
lowercase_ : Optional[int] = layer_type
lowercase_ : str = hidden_act
lowercase_ : Dict = downsample_in_first_stage
lowercase_ : str = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
lowercase_ , lowercase_ : List[str] = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : str = version.parse("1.11" )
@property
def A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Union[str, Any] ) -> float:
return 1e-3
| 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
"""simple docstring"""
_lowerCAmelCase :Any = tuple[float, float, float]
_lowerCAmelCase :Optional[Any] = tuple[float, float, float]
def lowerCamelCase_ (UpperCamelCase__ : Pointad , UpperCamelCase__ : Pointad ):
_UpperCAmelCase : str = end_pointa[0] - end_pointa[0]
_UpperCAmelCase : Optional[int] = end_pointa[1] - end_pointa[1]
_UpperCAmelCase : Union[str, Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCamelCase_ (UpperCamelCase__ : Vectorad , UpperCamelCase__ : Vectorad ):
_UpperCAmelCase : int = ab[1] * ac[2] - ab[2] * ac[1] # *i
_UpperCAmelCase : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_UpperCAmelCase : Union[str, Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCamelCase_ (UpperCamelCase__ : Vectorad , UpperCamelCase__ : int ):
return tuple(round(UpperCamelCase__ , UpperCamelCase__ ) for x in vector ) == (0, 0, 0)
def lowerCamelCase_ (UpperCamelCase__ : Pointad , UpperCamelCase__ : Pointad , UpperCamelCase__ : Pointad , UpperCamelCase__ : int = 10 ):
_UpperCAmelCase : Union[str, Any] = create_vector(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Tuple = create_vector(UpperCamelCase__ , UpperCamelCase__ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
| 354 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowerCAmelCase :int = get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
a__ ='''all_checks'''
a__ ='''basic_checks'''
a__ ='''no_checks'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
def lowerCamelCase_ (UpperCamelCase__ : Optional[dict] , UpperCamelCase__ : dict , UpperCamelCase__ : Tuple=None ):
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_UpperCAmelCase : Optional[Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase : str = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
def lowerCamelCase_ (UpperCamelCase__ : Optional[dict] , UpperCamelCase__ : dict ):
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_UpperCAmelCase : Dict = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info('''All the splits matched successfully.''' )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : bool = True ):
if record_checksum:
_UpperCAmelCase : Any = shaaaa()
with open(UpperCamelCase__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(UpperCamelCase__ )
_UpperCAmelCase : int = m.hexdigest()
else:
_UpperCAmelCase : Union[str, Any] = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 68 | 0 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCAmelCase : Tuple = HUGGINGFACE_HUB_CACHE
UpperCAmelCase : Union[str, Any] = """config.json"""
UpperCAmelCase : Union[str, Any] = """diffusion_pytorch_model.bin"""
UpperCAmelCase : Optional[Any] = """diffusion_flax_model.msgpack"""
UpperCAmelCase : Optional[int] = """model.onnx"""
UpperCAmelCase : int = """diffusion_pytorch_model.safetensors"""
UpperCAmelCase : List[Any] = """weights.pb"""
UpperCAmelCase : Optional[int] = """https://huggingface.co"""
UpperCAmelCase : str = default_cache_path
UpperCAmelCase : str = """diffusers_modules"""
UpperCAmelCase : int = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
UpperCAmelCase : Dict = ["""fp16""", """non-ema"""]
UpperCAmelCase : List[str] = """.self_attn"""
| 95 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Any = bs[:]
__SCREAMING_SNAKE_CASE : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : str = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]="replace" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : str = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : int = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Tuple = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Tuple = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : List[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : str = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 112 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( A_ ):
@require_torch
def _a ( self ) -> str:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__UpperCamelCase ='\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__UpperCamelCase ='\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__UpperCamelCase ='\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__UpperCamelCase ='hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(A_ )
BertModel.from_pretrained(A_ )
BertTokenizer.from_pretrained(A_ )
pipeline(task='fill-mask' , model=A_ )
# baseline - just load from_pretrained with normal network
__UpperCamelCase =[sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
__UpperCamelCase =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCamelCase ='1'
__UpperCamelCase =subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def _a ( self ) -> List[str]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__UpperCamelCase ='\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__UpperCamelCase ='\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__UpperCamelCase ='\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__UpperCamelCase ='hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(A_ )
BertModel.from_pretrained(A_ )
BertTokenizer.from_pretrained(A_ )
pipeline(task='fill-mask' , model=A_ )
# baseline - just load from_pretrained with normal network
__UpperCamelCase =[sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
__UpperCamelCase =self.get_env()
__UpperCamelCase =subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def _a ( self ) -> int:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__UpperCamelCase ='\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
__UpperCamelCase ='\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
__UpperCamelCase ='\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
__UpperCamelCase =[sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
__UpperCamelCase =self.get_env()
__UpperCamelCase =subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
__UpperCamelCase =[sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCamelCase ='1'
__UpperCamelCase =subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='\nfrom transformers import pipeline\n '
__UpperCamelCase ='\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
__UpperCamelCase ='\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
__UpperCamelCase =self.get_env()
__UpperCamelCase ='1'
__UpperCamelCase =[sys.executable, '-c', '\n'.join([load, mock, run] )]
__UpperCamelCase =subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def _a ( self ) -> Any:
__UpperCamelCase ='\nfrom transformers import AutoModel\n '
__UpperCamelCase ='\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
__UpperCamelCase =[sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
__UpperCamelCase =self.get_env()
__UpperCamelCase =subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCamelCase ='1'
__UpperCamelCase =subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 350 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ViTFeatureExtractor']
_A = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = 32
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 37
_UpperCAmelCase = 'gelu'
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 512
_UpperCAmelCase = 16
_UpperCAmelCase = 2
_UpperCAmelCase = 0.02
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = None
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = TFDistilBertModel(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = TFDistilBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFDistilBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDistilBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFDistilBertForMultipleChoice(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDistilBertForTokenClassification(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_a : int = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Dict = False
_a : List[Any] = False
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFDistilBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_UpperCAmelCase = TFDistilBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
_a : int
_a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape
_UpperCAmelCase = jax.image.resize(
_SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_UpperCAmelCase = self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
_a : int
_a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
_a : int
_a : int = None
_a : float = 0.0
_a : bool = None
_a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = nn.Dense(_SCREAMING_SNAKE_CASE , dtype=self.dtype )
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Dropout(self.dropout_prob )
_UpperCAmelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCAmelCase = None
if use_nin_shortcut:
_UpperCAmelCase = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = hidden_states
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.swish(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.conva(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.time_emb_proj(nn.swish(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , 1 )
_UpperCAmelCase = hidden_states + temb
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.swish(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.dropout(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.conva(_SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
_UpperCAmelCase = self.conv_shortcut(_SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 369 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a ( UpperCAmelCase ):
_a : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 185 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ = 1_000 ):
__UpperCamelCase , __UpperCamelCase : str = 1, 1
__UpperCamelCase : int = 2
while True:
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Dict = fa + fa
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = fa, f
index += 1
for _ in str(__a ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 298 |
from __future__ import annotations
from PIL import Image
# Define glider example
A : Any = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
A__ = []
for i in range(len(__a ) ):
A__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]:
"""simple docstring"""
A__ = []
for _ in range(__a ):
# Create output image
A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) )
A__ = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
A__ = 2_5_5 - cells[y][x] * 2_5_5
A__ = (colour, colour, colour)
# Save image
images.append(__a )
A__ = new_generation(__a )
return images
if __name__ == "__main__":
A : str = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 274 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[Any] = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] ='''sew-d'''
def __init__( self , _lowerCamelCase=3_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase=2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2_5_6 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=("p2c", "c2p") , _lowerCamelCase="layer_norm" , _lowerCamelCase="gelu_python" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-7 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowerCamelCase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowerCamelCase=False , _lowerCamelCase=1_2_8 , _lowerCamelCase=1_6 , _lowerCamelCase=True , _lowerCamelCase=0.0_5 , _lowerCamelCase=1_0 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=1_0 , _lowerCamelCase=0 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=2_5_6 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: List[str] = feat_extract_norm
UpperCamelCase_: int = feat_extract_activation
UpperCamelCase_: Optional[int] = list(_lowerCamelCase )
UpperCamelCase_: str = list(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = list(_lowerCamelCase )
UpperCamelCase_: Optional[int] = conv_bias
UpperCamelCase_: Dict = num_conv_pos_embeddings
UpperCamelCase_: List[Any] = num_conv_pos_embedding_groups
UpperCamelCase_: str = len(self.conv_dim )
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: List[str] = intermediate_size
UpperCamelCase_: Any = squeeze_factor
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Any = position_buckets
UpperCamelCase_: Union[str, Any] = share_att_key
UpperCamelCase_: Optional[int] = relative_attention
UpperCamelCase_: List[Any] = norm_rel_ebd
UpperCamelCase_: Tuple = list(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Optional[Any] = hidden_dropout
UpperCamelCase_: int = attention_dropout
UpperCamelCase_: Tuple = activation_dropout
UpperCamelCase_: List[Any] = feat_proj_dropout
UpperCamelCase_: Optional[int] = final_dropout
UpperCamelCase_: str = layer_norm_eps
UpperCamelCase_: Dict = feature_layer_norm_eps
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase_: List[Any] = apply_spec_augment
UpperCamelCase_: str = mask_time_prob
UpperCamelCase_: str = mask_time_length
UpperCamelCase_: Tuple = mask_time_min_masks
UpperCamelCase_: str = mask_feature_prob
UpperCamelCase_: List[Any] = mask_feature_length
UpperCamelCase_: List[Any] = mask_feature_min_masks
# ctc loss
UpperCamelCase_: List[Any] = ctc_loss_reduction
UpperCamelCase_: Dict = ctc_zero_infinity
# sequence classification
UpperCamelCase_: Dict = use_weighted_layer_sum
UpperCamelCase_: str = classifier_proj_size
@property
def _a ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 292 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
while b:
UpperCamelCase_ ,UpperCamelCase_: int = b, a % b
return a
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def snake_case () -> int:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main() | 292 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( ) -> str:
'''simple docstring'''
snake_case_ = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case_ = json.loads(__UpperCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case_ = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case_ = json.loads(__UpperCAmelCase )
if not mpi_options.get('''sagemaker_mpi_enabled''', __UpperCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a ( _lowerCamelCase ):
snake_case_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def A_ ( self : Optional[Any] ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , lowercase_ , )
@cached_property
def A_ ( self : List[Any] ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
snake_case_ = torch.device('''cpu''' )
snake_case_ = 0
elif is_sagemaker_model_parallel_available():
snake_case_ = smp.local_rank()
snake_case_ = torch.device('''cuda''' , lowercase_ )
snake_case_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
snake_case_ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
snake_case_ = torch.device('''cuda''' , self.local_rank )
snake_case_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
snake_case_ = torch.device('''cuda''' , self.local_rank )
snake_case_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_ )
return device
@property
def A_ ( self : Optional[int] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A_ ( self : Dict ):
return not is_sagemaker_model_parallel_available()
@property
def A_ ( self : Optional[Any] ):
return False
| 56 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = (IPNDMScheduler,)
_lowerCamelCase = (('''num_inference_steps''', 50),)
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> Optional[Any]:
A = {"""num_train_timesteps""": 1_0_0_0}
config.update(**lowerCamelCase_ )
return config
def UpperCamelCase__ ( self ,lowerCamelCase_=0 ,**lowerCamelCase_ ) -> Optional[int]:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" ,lowerCamelCase_ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**lowerCamelCase_ )
A = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
A = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
A = new_scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
A = new_scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ) -> Dict:
pass
def UpperCamelCase__ ( self ,lowerCamelCase_=0 ,**lowerCamelCase_ ) -> Any:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" ,lowerCamelCase_ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
A = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[:]
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
A = new_scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
A = new_scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**lowerCamelCase_ )
A = scheduler_class(**lowerCamelCase_ )
A = 1_0
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
A = model(lowerCamelCase_ ,lowerCamelCase_ )
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A = model(lowerCamelCase_ ,lowerCamelCase_ )
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ) -> List[Any]:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" ,lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = self.dummy_sample
A = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ ,"""set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ ,"""set_timesteps""" ):
A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A = dummy_past_residuals[:]
A = scheduler.timesteps[5]
A = scheduler.timesteps[6]
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCamelCase__ ( self ) -> List[str]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ ,time_step=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowerCamelCase_ ,time_step=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.full_loop()
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 77 |
"""simple docstring"""
from math import factorial
def _A ( _a : int = 1_0_0 ):
"""simple docstring"""
return sum(map(_a , str(factorial(_a ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 77 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
def constraint_to_multiple_of(lowercase , lowercase , lowercase=0 , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[str] =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_: List[str] =math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_: Optional[Any] =math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_: int =(output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =get_image_size(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =output_size
# determine new height and width
SCREAMING_SNAKE_CASE_: int =output_height / input_height
SCREAMING_SNAKE_CASE_: List[str] =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_: Any =scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_: List[Any] =scale_height
SCREAMING_SNAKE_CASE_: List[Any] =constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
SCREAMING_SNAKE_CASE_: Dict =constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class a ( UpperCamelCase__ ):
UpperCamelCase : List[Any] = ['pixel_values']
def __init__( self : List[str] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = False , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
SCREAMING_SNAKE_CASE_: Dict =size if size is not None else {"""height""": 384, """width""": 384}
SCREAMING_SNAKE_CASE_: Optional[int] =get_size_dict(lowercase_ )
SCREAMING_SNAKE_CASE_: int =do_resize
SCREAMING_SNAKE_CASE_: List[str] =size
SCREAMING_SNAKE_CASE_: Union[str, Any] =keep_aspect_ratio
SCREAMING_SNAKE_CASE_: Dict =ensure_multiple_of
SCREAMING_SNAKE_CASE_: int =resample
SCREAMING_SNAKE_CASE_: List[str] =do_rescale
SCREAMING_SNAKE_CASE_: List[Any] =rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] =do_normalize
SCREAMING_SNAKE_CASE_: Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_: List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : bool = False , lowerCAmelCase : int = 1 , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: Tuple =get_resize_output_image_size(
lowercase_ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ) -> int:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Tuple =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowercase_ )
SCREAMING_SNAKE_CASE_: Any =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_: Optional[Any] =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_: List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: List[str] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[int] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[int] =make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Optional[int] =[to_numpy_array(lowercase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: List[Any] =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Tuple =[self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: int =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] =[to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
SCREAMING_SNAKE_CASE_: int ={"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase_ ):
SCREAMING_SNAKE_CASE_: int =target_sizes.numpy()
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for idx in range(len(lowercase_ ) ):
SCREAMING_SNAKE_CASE_: Any =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
SCREAMING_SNAKE_CASE_: List[str] =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_: Any =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 173 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """char"""
__lowercase = """bpe"""
__lowercase = """wp"""
__lowerCAmelCase : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """char_tokenizer"""]
__lowercase = """ViTImageProcessor"""
__lowercase = """MgpstrTokenizer"""
def __init__( self :int , lowercase_ :int=None , lowercase_ :List[str]=None , **lowercase_ :List[Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained("gpt2" )
A__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Any=None , lowercase_ :Tuple=None , lowercase_ :List[str]=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None:
A__ = self.char_tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int:
A__, A__, A__ = sequences
A__ = char_preds.size(0 )
A__, A__ = self._decode_helper(lowercase_ , "char" )
A__, A__ = self._decode_helper(lowercase_ , "bpe" )
A__, A__ = self._decode_helper(lowercase_ , "wp" )
A__ = []
A__ = []
for i in range(lowercase_ ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(lowercase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str )-> Optional[Any]:
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = "[s]"
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = "#"
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
A__, A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__, A__ = pred_logits.topk(1 , dim=-1 , largest=lowercase_ , sorted=lowercase_ )
A__ = preds_index.view(-1 , lowercase_ )[:, 1:]
A__ = decoder(lowercase_ )
A__, A__ = torch.nn.functional.softmax(lowercase_ , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(lowercase_ ):
A__ = preds_str[index].find(lowercase_ )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(lowercase_ ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase_ )
conf_scores.append(lowercase_ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> int:
A__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowercase_ )]
return decode_strs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> List[str]:
return self.bpe_tokenizer.batch_decode(lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowercase_ )]
return decode_strs
| 237 | 0 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
debug_launcher(test_script.main )
def __snake_case ( self : Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 172 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : str = "imagegpt"
__A : str = ["past_key_values"]
__A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=5_1_2 + 1 , snake_case__ : Optional[int]=3_2 * 3_2 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[str]=2_4 , snake_case__ : Any=8 , snake_case__ : str=None , snake_case__ : Any="quick_gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=1e-5 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : str=False , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=False , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
lowercase :int = vocab_size
lowercase :str = n_positions
lowercase :List[str] = n_embd
lowercase :int = n_layer
lowercase :List[str] = n_head
lowercase :Tuple = n_inner
lowercase :Tuple = activation_function
lowercase :Optional[Any] = resid_pdrop
lowercase :Tuple = embd_pdrop
lowercase :Dict = attn_pdrop
lowercase :List[Any] = layer_norm_epsilon
lowercase :List[Any] = initializer_range
lowercase :List[Any] = scale_attn_weights
lowercase :Dict = use_cache
lowercase :List[str] = scale_attn_by_inverse_layer_idx
lowercase :List[str] = reorder_and_upcast_attn
lowercase :Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class __magic_name__ ( __UpperCAmelCase ):
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __snake_case ( self : Union[str, Any] , snake_case__ : "FeatureExtractionMixin" , snake_case__ : int = 1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , ):
'''simple docstring'''
lowercase :Union[str, Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :List[str] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 172 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.node_position[vertex]
def _lowercase ( self : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : str ):
__lowercase = pos
def _lowercase ( self : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowercase = 2 * start + 1
else:
__lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowercase ,__lowercase = heap[smallest_child], positions[smallest_child]
__lowercase ,__lowercase = (
heap[start],
positions[start],
)
__lowercase ,__lowercase = temp, tempa
__lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], UpperCAmelCase__ )
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Tuple ):
__lowercase = position[index]
while index != 0:
__lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowercase = heap[parent]
__lowercase = position[parent]
self.set_position(position[parent], UpperCAmelCase__ )
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, UpperCAmelCase__ )
break
__lowercase = parent
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, 0 )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
__lowercase = len(UpperCAmelCase__ ) // 2 - 1
for i in range(UpperCAmelCase__, -1, -1 ):
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, len(UpperCAmelCase__ ), UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any] ):
__lowercase = positions[0]
__lowercase = sys.maxsize
self.top_to_bottom(UpperCAmelCase__, 0, len(UpperCAmelCase__ ), UpperCAmelCase__ )
return temp
def _A ( UpperCamelCase_ : Dict) -> Optional[Any]:
'''simple docstring'''
__lowercase = Heap()
__lowercase = [0] * len(UpperCamelCase_)
__lowercase = [-1] * len(UpperCamelCase_) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowercase = [] # Heap of Distance of vertices from their neighboring vertex
__lowercase = []
for vertex in range(len(UpperCamelCase_)):
distance_tv.append(sys.maxsize)
positions.append(UpperCamelCase_)
heap.node_position.append(UpperCamelCase_)
__lowercase = []
__lowercase = 1
__lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowercase = 0
__lowercase = distance
heap.heapify(UpperCamelCase_, UpperCamelCase_)
for _ in range(1, len(UpperCamelCase_)):
__lowercase = heap.delete_minimum(UpperCamelCase_, UpperCamelCase_)
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex))
__lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_)]
):
__lowercase = distance
heap.bottom_to_top(
UpperCamelCase_, heap.get_position(UpperCamelCase_), UpperCamelCase_, UpperCamelCase_)
__lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input('Enter number of edges: ').strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 17 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
__A : List[str] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : Dict = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( _SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = -1
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if arr[i] < arr[j]:
_UpperCAmelCase = arr[j]
break
result.append(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( _SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
_UpperCAmelCase = []
for i, outer in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCAmelCase = inner
break
result.append(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( _SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
_UpperCAmelCase = [-1] * arr_size
for index in reversed(range(_SCREAMING_SNAKE_CASE ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCAmelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : List[Any] = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 357 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 326 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_a : List[Any]= "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_a : Dict= "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_a : List[str]= "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_a : int= "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_a : str= "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase (self : List[Any]) -> Tuple:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Value('string'),
}) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def _lowercase (self : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : Union[str, Any]=[1, 10, 1_00] , _A : int=4 , _A : List[Any]=3.0) -> Dict:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.')
with ThreadPoolExecutor(max_workers=_A) as executor:
__snake_case : Any = []
__snake_case : int = Counter()
__snake_case : Union[str, Any] = 0
__snake_case : str = defaultdict(_A)
for task_id, (candidates, test_case) in enumerate(zip(_A , _A)):
for candidate in candidates:
__snake_case : List[str] = candidate + '\n' + test_case
__snake_case : List[Any] = (test_program, timeout, task_id, completion_id[task_id])
__snake_case : Dict = executor.submit(_A , *_A)
futures.append(_A)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_A):
__snake_case : Union[str, Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result))
__snake_case , __snake_case : Any = [], []
for result in results.values():
result.sort()
__snake_case : Any = [r[1]['passed'] for r in result]
total.append(len(_A))
correct.append(sum(_A))
__snake_case : Tuple = np.array(_A)
__snake_case : Optional[Any] = np.array(_A)
__snake_case : Optional[Any] = k
__snake_case : Dict = {f"pass@{k}": estimate_pass_at_k(_A , _A , _A).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
def estimator(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__snake_case : Optional[Any] = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
__snake_case : Tuple = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 172 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : str= {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a : Any= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 172 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 353 |
"""simple docstring"""
import math
def _lowercase ( __snake_case ) -> bool:
__lowerCAmelCase : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__snake_case )
def _lowercase ( __snake_case = 1 / 12_345 ) -> int:
__lowerCAmelCase : str = 0
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 3
while True:
__lowerCAmelCase : Optional[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__snake_case ):
__lowerCAmelCase : str = int(__snake_case )
total_partitions += 1
if check_partition_perfect(__snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__snake_case )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""") | 58 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''')
UpperCAmelCase_ = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
UpperCAmelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
UpperCAmelCase_ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id)
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case).logits
UpperCAmelCase_ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1])).mean()
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 51 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : List[Any] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 210 |
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 210 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _UpperCAmelCase ( __A ):
UpperCamelCase = 42
class _UpperCAmelCase ( __A , __A ):
@register_to_config
def __init__( self :List[Any] , __UpperCamelCase :Tuple = 3 , __UpperCamelCase :str = 3 , __UpperCamelCase :Tuple = ("DownEncoderBlock2D",) , __UpperCamelCase :str = ("UpDecoderBlock2D",) , __UpperCamelCase :Optional[Any] = (64,) , __UpperCamelCase :int = 1 , __UpperCamelCase :Tuple = "silu" , __UpperCamelCase :List[str] = 3 , __UpperCamelCase :Optional[int] = 32 , __UpperCamelCase :Union[str, Any] = 2_56 , __UpperCamelCase :int = 32 , __UpperCamelCase :Optional[Any] = None , __UpperCamelCase :List[str] = 0.18_215 , __UpperCamelCase :Union[str, Any] = "group" , ):
super().__init__()
# pass init params to Encoder
A = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
A = vq_embed_dim if vq_embed_dim is not None else latent_channels
A = nn.Convad(__lowercase , __lowercase , 1 )
A = VectorQuantizer(__lowercase , __lowercase , beta=0.25 , remap=__lowercase , sane_index_shape=__lowercase )
A = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
A = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :str = True ):
A = self.encoder(__lowercase )
A = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :int , __UpperCamelCase :List[str] = False , __UpperCamelCase :int = True ):
# also go through quantization layer
if not force_not_quantize:
A, A, A = self.quantize(__lowercase )
else:
A = h
A = self.post_quant_conv(__lowercase )
A = self.decoder(__lowercase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :int = True ):
A = sample
A = self.encode(__lowercase ).latents
A = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 292 |
'''simple docstring'''
import operator
def __magic_name__( lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None):
__lowerCAmelCase = operator.lt if reverse else operator.gt
__lowerCAmelCase = solution or []
if not arr:
return solution
__lowerCAmelCase = [arr.pop(0)]
for i, item in enumerate(lowerCamelCase):
if _operator(lowerCamelCase, sublist[-1]):
sublist.append(lowerCamelCase)
arr.pop(lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase)
else:
while sublist:
__lowerCAmelCase = sublist.pop(0)
for i, xx in enumerate(lowerCamelCase):
if not _operator(lowerCamelCase, lowerCamelCase):
solution.insert(lowerCamelCase, lowerCamelCase)
break
else:
solution.append(lowerCamelCase)
strand_sort(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 174 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__SCREAMING_SNAKE_CASE : int = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Tuple = "albert"
def __init__( self , lowerCamelCase__=3_0_0_0_0 , lowerCamelCase__=1_2_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=1_2 , lowerCamelCase__=1 , lowerCamelCase__=6_4 , lowerCamelCase__=1_6_3_8_4 , lowerCamelCase__=1 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=5_1_2 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-12 , lowerCamelCase__=0.1 , lowerCamelCase__="absolute" , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__=3 , **lowerCamelCase__ , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCamelCase = vocab_size
_lowerCamelCase = embedding_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_hidden_groups
_lowerCamelCase = num_attention_heads
_lowerCamelCase = inner_group_num
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = classifier_dropout_prob
_lowerCamelCase = position_embedding_type
class lowerCamelCase_( A__ ):
'''simple docstring'''
@property
def snake_case__ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 351 |
"""simple docstring"""
import qiskit
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> qiskit.result.counts.Counts:
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCamelCase = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ :Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =XLMProphetNetTokenizer
UpperCamelCase__ : int =False
UpperCamelCase__ : Optional[int] =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : List[Any] =XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='[PAD]'
__UpperCamelCase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCamelCase__ ) , 1012 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCamelCase : List[str] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__UpperCamelCase : List[str] =tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__UpperCamelCase : int =tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def __lowercase ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='Hello World!'
__UpperCamelCase : Union[str, Any] =[35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int ={'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 71 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 100 ,) -> float:
__lowerCamelCase : Dict = x_start
__lowerCamelCase : int = fnc(_lowerCAmelCase )
__lowerCamelCase : Dict = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__lowerCamelCase : List[str] = (x_end - x_start) / steps + xa
__lowerCamelCase : List[Any] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
__lowerCamelCase : Any = xa
__lowerCamelCase : Tuple = fxa
return length
if __name__ == "__main__":
def a_ ( _lowerCAmelCase ) -> Dict:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_UpperCamelCase = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 208 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase ={
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def _A ( _a : Optional[Any] , _a : Tuple , _a : Optional[Any] , _a : Any=None ):
"""simple docstring"""
A = XLNetConfig.from_json_file(a__ )
A = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
A = finetuning_task
A = GLUE_TASKS_NUM_LABELS[finetuning_task]
A = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
A = finetuning_task
A = XLNetForQuestionAnswering(a__ )
else:
A = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
A = os.path.join(a__ , a__ )
A = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
UpperCAmelCase =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 361 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase =logging.get_logger(__name__)
def _A ( _a : List[str] ):
"""simple docstring"""
if isinstance(_a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_a ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BILINEAR ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_5_6}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = resample
A = do_rescale
A = rescale_factor
A = offset
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BILINEAR ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
if "shortest_edge" in size:
A = get_resize_output_image_size(lowerCamelCase_ ,size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
elif "height" in size and "width" in size:
A = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> List[str]:
A = image.astype(np.floataa )
if offset:
A = image - (scale / 2)
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
A = to_numpy_array(lowerCamelCase_ )
if do_resize:
A = self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ )
if do_center_crop:
A = self.center_crop(lowerCamelCase_ ,size=lowerCamelCase_ )
if do_rescale:
A = self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ,offset=lowerCamelCase_ )
if do_normalize:
A = self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ )
A = to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ )
return image
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = offset if offset is not None else self.offset
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
A = make_batched(lowerCamelCase_ )
A = [
[
self._preprocess_image(
image=lowerCamelCase_ ,do_resize=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,do_center_crop=lowerCamelCase_ ,crop_size=lowerCamelCase_ ,do_rescale=lowerCamelCase_ ,rescale_factor=lowerCamelCase_ ,offset=lowerCamelCase_ ,do_normalize=lowerCamelCase_ ,image_mean=lowerCamelCase_ ,image_std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,)
for img in video
]
for video in videos
]
A = {"""pixel_values""": videos}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase__ :
a_ =LEDConfig
a_ ={}
a_ ="""gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=4 , )-> str:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCAmelCase__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCAmelCase__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCAmelCase__ = prepare_led_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tf.concat(
[tf.zeros_like(__UpperCAmelCase )[:, :-1], tf.ones_like(__UpperCAmelCase )[:, -1:]] , axis=-1 , )
lowerCAmelCase__ = global_attention_mask
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = TFLEDModel(config=__UpperCAmelCase ).get_decoder()
lowerCAmelCase__ = inputs_dict["input_ids"]
lowerCAmelCase__ = input_ids[:1, :]
lowerCAmelCase__ = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase__ = 1
# first forward pass
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a_ =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
a_ =(
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ =True
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = TFLEDModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = tf.zeros_like(inputs_dict["attention_mask"] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.model_tester.seq_length
lowerCAmelCase__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCAmelCase ):
lowerCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCAmelCase ):
lowerCAmelCase__ = [t.numpy() for t in outputs.encoder_attentions]
lowerCAmelCase__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ = len(__UpperCAmelCase )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
if self.is_encoder_decoder:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_decoder_attentions_output(__UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
pass
def _a ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return tf.constant(UpperCamelCase_ , dtype=tf.intaa )
a_ = 1E-4
@slow
@require_tf
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
lowerCAmelCase__ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase__ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase__ = prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ = (1, 1024, 768)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-3 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
lowerCAmelCase__ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase__ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase__ = prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-3 , rtol=1E-3 )
| 340 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Optional[Any] = ""
UpperCAmelCase : Tuple = ""
UpperCAmelCase : List[str] = ""
UpperCAmelCase : List[Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
lowercase_ = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print("""Processing...""" )
lowercase_ = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ = random_chars(32 )
lowercase_ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowercase_ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
lowercase_ = []
for anno in new_annos[index]:
lowercase_ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
lowercase_ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
lowercase_ = in_file.readlines()
lowercase_ = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
lowercase_ = []
for obj_list in obj_lists:
lowercase_ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 ) -> List[str]:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
lowercase_ = []
for idx in range(len(__lowerCAmelCase ) ):
lowercase_ = []
lowercase_ = img_list[idx]
path_list.append(__lowerCAmelCase )
lowercase_ = anno_list[idx]
lowercase_ = cva.imread(__lowerCAmelCase )
if flip_type == 1:
lowercase_ = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowercase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase_ = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
lowercase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 32 ) -> int:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowercase_ = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 370 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for attribute in key.split('.' ):
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
else:
lowercase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase = None
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
lowercase = True
elif name.split('.' )[0] == "proj":
lowercase = fairseq_model.proj
lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(__SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowercase = mapped_key.replace('*' , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase = 'weight_g'
elif "weight_v" in name:
lowercase = 'weight_v'
elif "bias" in name:
lowercase = 'bias'
elif "weight" in name:
lowercase = 'weight'
else:
lowercase = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = full_name.split('conv_layers.' )[-1]
lowercase = name.split('.' )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
lowercase = f.readlines()
lowercase = [line.split(' ' )[0] for line in lines]
lowercase = len(__SCREAMING_SNAKE_CASE )
lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
lowercase = WavaVecaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = SpeechaTextaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE , decoder_layers=__SCREAMING_SNAKE_CASE , do_stable_layer_norm=__SCREAMING_SNAKE_CASE )
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase = model[0].eval()
# set weights for wav2vec2 encoder
lowercase = WavaVecaModel(__SCREAMING_SNAKE_CASE )
lowercase = recursively_load_weights_wavaveca(model.encoder , __SCREAMING_SNAKE_CASE )
lowercase = SpeechaTextaForCausalLM(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase = False
# add projection layer
lowercase = nn.Parameter(projection_layer.weight )
lowercase = nn.Parameter(projection_layer.bias )
lowercase = create_vocab_dict(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = SpeechaTextaTokenizer(os.path.join(__SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = hf_wavavec.config.to_dict()
lowercase = tokenizer.pad_token_id
lowercase = tokenizer.bos_token_id
lowercase = tokenizer.eos_token_id
lowercase = 'speech_to_text_2'
lowercase = 'wav2vec2'
lowercase = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 195 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
_snake_case : Dict = logging.getLogger(__name__)
_snake_case : List[Any] = {"facebook/bart-base": BartForConditionalGeneration}
_snake_case : List[str] = {"facebook/bart-base": BartTokenizer}
def lowerCAmelCase_ ( ):
__snake_case : Tuple = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=__lowerCamelCase , default=__lowerCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=__lowerCamelCase , default=__lowerCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=__lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCamelCase , )
parser.add_argument(
"--config_name" , type=__lowerCamelCase , default=__lowerCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=__lowerCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=__lowerCamelCase , default=__lowerCamelCase , help="Where to store the final ONNX file." )
__snake_case : Optional[int] = parser.parse_args()
return args
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase="cpu" ):
__snake_case : str = model_dict[model_name].from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
__snake_case : int = tokenizer_dict[model_name].from_pretrained(__lowerCamelCase )
if model_name in ["facebook/bart-base"]:
__snake_case : Any = 0
__snake_case : Any = None
__snake_case : List[Any] = 0
return huggingface_model, tokenizer
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
model.eval()
__snake_case : Dict = None
__snake_case : Dict = torch.jit.script(BARTBeamSearchGenerator(__lowerCamelCase ) )
with torch.no_grad():
__snake_case : Any = "My friends are cool but they eat too many carbs."
__snake_case : Optional[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="pt" ).to(model.device )
__snake_case : Optional[int] = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__lowerCamelCase , max_length=__lowerCamelCase , early_stopping=__lowerCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCamelCase , opset_version=1_4 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=__lowerCamelCase , )
logger.info("Model exported to {}".format(__lowerCamelCase ) )
__snake_case : Union[str, Any] = remove_dup_initializers(os.path.abspath(__lowerCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(__lowerCamelCase ) )
__snake_case : Dict = onnxruntime.InferenceSession(__lowerCamelCase )
__snake_case : Any = ort_sess.run(
__lowerCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(__lowerCamelCase ),
"max_length": np.array(__lowerCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def lowerCAmelCase_ ( ):
__snake_case : Union[str, Any] = parse_args()
__snake_case : List[str] = 5
__snake_case : Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__snake_case : Any = torch.device(args.device )
__snake_case : str = load_model_tokenizer(args.model_name_or_path , __lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(__lowerCamelCase )
if args.max_length:
__snake_case : int = args.max_length
if args.num_beams:
__snake_case : Tuple = args.num_beams
if args.output_file_path:
__snake_case : List[Any] = args.output_file_path
else:
__snake_case : Union[str, Any] = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 368 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase : Any , **lowerCamelCase : Tuple ) -> int:
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any]=None ) -> Optional[int]:
__snake_case : Optional[Any] = {}
if top_k is not None:
__snake_case : List[Any] = top_k
return {}, {}, postprocess_params
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase : Dict ) -> Optional[int]:
return super().__call__(lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
__snake_case : Any = load_image(lowerCamelCase )
__snake_case : str = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def __snake_case ( self : int , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : List[Any] = self.model(**lowerCamelCase )
return model_outputs
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
__snake_case : int = self.model.config.num_labels
if self.framework == "pt":
__snake_case : Optional[Any] = model_outputs.logits.softmax(-1 )[0]
__snake_case , __snake_case : List[str] = probs.topk(lowerCamelCase )
elif self.framework == "tf":
__snake_case : Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
__snake_case : Optional[Any] = tf.math.top_k(lowerCamelCase , k=lowerCamelCase )
__snake_case , __snake_case : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__snake_case : Any = scores.tolist()
__snake_case : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 134 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
A = DatasetInfosDict.from_directory(_lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = str(_lowerCamelCase )
dataset_info.write_to_directory(_lowerCamelCase )
A = DatasetInfo.from_directory(_lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , """dataset_info.json""" ) )
def __a ( ) ->Dict:
"""simple docstring"""
A = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
A = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
A = yaml.safe_dump(_lowerCamelCase )
A = yaml.safe_load(_lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def __a ( ) ->Tuple:
"""simple docstring"""
A = DatasetInfo()
A = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = str(_lowerCamelCase )
dataset_infos_dict.write_to_directory(_lowerCamelCase )
A = DatasetInfosDict.from_directory(_lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , """README.md""" ) )
| 258 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :str , a :str = None , a :uuid.UUID = None , a :Tuple=None , a :Optional[Any]=None ) -> str:
if not conversation_id:
__UpperCamelCase : Dict = uuid.uuida()
if past_user_inputs is None:
__UpperCamelCase : List[Any] = []
if generated_responses is None:
__UpperCamelCase : Any = []
__UpperCamelCase : uuid.UUID = conversation_id
__UpperCamelCase : List[str] = past_user_inputs
__UpperCamelCase : List[str] = generated_responses
__UpperCamelCase : Optional[str] = text
def __eq__( self :Optional[int] , a :Optional[int] ) -> Union[str, Any]:
if not isinstance(a , a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCamelCase ( self :Optional[int] , a :str , a :bool = False ) -> str:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
__UpperCamelCase : Any = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__UpperCamelCase : int = text
def _lowerCamelCase ( self :List[str] ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__UpperCamelCase : Dict = None
def _lowerCamelCase ( self :Optional[int] , a :str ) -> Optional[int]:
self.generated_responses.append(a )
def _lowerCamelCase ( self :int ) -> Optional[Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self :List[str] ) -> List[Any]:
__UpperCamelCase : Any = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__UpperCamelCase : str = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowercase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Tuple , *a :Tuple , **a :List[str] ) -> Tuple:
super().__init__(*a , **a )
if self.tokenizer.pad_token_id is None:
__UpperCamelCase : int = self.tokenizer.eos_token
def _lowerCamelCase ( self :Optional[int] , a :List[Any]=None , a :str=None , a :int=None , **a :str ) -> List[str]:
__UpperCamelCase : List[str] = {}
__UpperCamelCase : List[str] = {}
__UpperCamelCase : str = {}
if min_length_for_response is not None:
__UpperCamelCase : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
__UpperCamelCase : List[str] = minimum_tokens
if "max_length" in generate_kwargs:
__UpperCamelCase : List[Any] = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__UpperCamelCase : List[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(a )
return preprocess_params, forward_params, postprocess_params
def __call__( self :Dict , a :Union[Conversation, List[Conversation]] , a :List[Any]=0 , **a :Any ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = super().__call__(a , num_workers=a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
def _lowerCamelCase ( self :Tuple , a :Conversation , a :Dict=3_2 ) -> Dict[str, Any]:
if not isinstance(a , a ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
__UpperCamelCase : str = self.tokenizer._build_conversation_input_ids(a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__UpperCamelCase : Optional[Any] = self._legacy_parse_and_tokenize(a )
if self.framework == "pt":
__UpperCamelCase : Dict = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__UpperCamelCase : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCamelCase ( self :Any , a :List[Any] , a :Optional[Any]=1_0 , **a :Tuple ) -> List[str]:
__UpperCamelCase : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
__UpperCamelCase : Dict = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__UpperCamelCase : Dict = max_length - minimum_tokens
__UpperCamelCase : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
__UpperCamelCase : Dict = model_inputs["attention_mask"][:, -trim:]
__UpperCamelCase : List[str] = model_inputs.pop("conversation" )
__UpperCamelCase : Optional[int] = max_length
__UpperCamelCase : str = self.model.generate(**a , **a )
if self.model.config.is_encoder_decoder:
__UpperCamelCase : List[str] = 1
else:
__UpperCamelCase : Optional[int] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[int]=True ) -> Union[str, Any]:
__UpperCamelCase : List[str] = model_outputs["output_ids"]
__UpperCamelCase : Any = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
__UpperCamelCase : int = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(a )
return conversation
def _lowerCamelCase ( self :str , a :Conversation ) -> Dict:
__UpperCamelCase : int = self.tokenizer.eos_token_id
__UpperCamelCase : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) )
if len(a ) > self.tokenizer.model_max_length:
__UpperCamelCase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 232 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''yolos'''
def __init__( self :Optional[int] , __magic_name__ :Tuple=768 , __magic_name__ :Dict=12 , __magic_name__ :List[Any]=12 , __magic_name__ :str=3072 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :Dict=0.02 , __magic_name__ :Optional[Any]=1E-1_2 , __magic_name__ :Optional[int]=[512, 864] , __magic_name__ :List[Any]=16 , __magic_name__ :Any=3 , __magic_name__ :List[str]=True , __magic_name__ :Union[str, Any]=100 , __magic_name__ :Dict=True , __magic_name__ :List[str]=False , __magic_name__ :List[str]=1 , __magic_name__ :str=5 , __magic_name__ :int=2 , __magic_name__ :str=5 , __magic_name__ :int=2 , __magic_name__ :int=0.1 , **__magic_name__ :int , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return 12
| 347 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase__ = [8, 5, 9, 7]
lowerCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = claim_vector
lowerCAmelCase : str = allocated_resources_table
lowerCAmelCase : Optional[Any] = maximum_claim_table
def lowercase__ ( self ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowercase__ ( self ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowercase__ ( self ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowercase__ ( self ):
"""simple docstring"""
return {self.__need().index(snake_case__ ): i for i in self.__need()}
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.__need()
lowerCAmelCase : Optional[Any] = self.__allocated_resources_table
lowerCAmelCase : str = self.__available_resources()
lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCAmelCase : str = False
for each_need in need_list:
lowerCAmelCase : Dict = True
for index, need in enumerate(snake_case__ ):
if need > available_resources[index]:
lowerCAmelCase : Optional[Any] = False
break
if execution:
lowerCAmelCase : List[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCAmelCase : Optional[Any] = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(snake_case__ )
# update available/freed resources stack
lowerCAmelCase : Union[str, Any] = np.array(snake_case__ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(snake_case__ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowercase__ ( self ):
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(snake_case__ ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(snake_case__ ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(snake_case__ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(snake_case__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
lowerCAmelCase , lowerCAmelCase : Any = b, a % b
return a
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def a__ ( ):
'''simple docstring'''
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 108 | 1 |
from collections import defaultdict
def UpperCamelCase ( _a , _a ) -> bool:
'''simple docstring'''
lowercase_ :Tuple = first_str.lower().strip()
lowercase_ :List[Any] = second_str.lower().strip()
# Remove whitespace
lowercase_ :Dict = first_str.replace(''' ''' , '''''' )
lowercase_ :Optional[int] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_a ) != len(_a ):
return False
# Default values for count should be 0
lowercase_ :defaultdict[str, int] = defaultdict(_a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Optional[Any] = input("Enter the first string ").strip()
SCREAMING_SNAKE_CASE : List[Any] = input("Enter the second string ").strip()
SCREAMING_SNAKE_CASE : Optional[Any] = check_anagrams(input_a, input_b)
print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 252 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE : Dict = 16
SCREAMING_SNAKE_CASE : str = 32
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
return int(x / 2**2_0 )
class UpperCamelCase :
'''simple docstring'''
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase_ :List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *UpperCamelCase_ ):
gc.collect()
torch.cuda.empty_cache()
lowercase_ :Any = torch.cuda.memory_allocated()
lowercase_ :Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase_ :Optional[int] = bamb(self.end - self.begin )
lowercase_ :List[str] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase ( _a , _a = 1_6 , _a = "bert-base-cased" , _a = 3_2_0 , _a = 1_6_0 , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(_a )
lowercase_ :int = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase_ :Tuple = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ :int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_a , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(_a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase_ :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
lowercase_ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _a , _a ) -> List[Any]:
'''simple docstring'''
lowercase_ :Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ :Dict = config['''lr''']
lowercase_ :List[Any] = int(config['''num_epochs'''] )
lowercase_ :Tuple = int(config['''seed'''] )
lowercase_ :List[str] = int(config['''batch_size'''] )
lowercase_ :Optional[Any] = args.model_name_or_path
set_seed(_a )
lowercase_ , lowercase_ :Any = get_dataloaders(_a , _a , _a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ :Tuple = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a )
# Instantiate optimizer
lowercase_ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase_ :str = optimizer_cls(params=model.parameters() , lr=_a )
if accelerator.state.deepspeed_plugin is not None:
lowercase_ :str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase_ :List[str] = 1
lowercase_ :Union[str, Any] = (len(_a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase_ :int = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , )
else:
lowercase_ :str = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ :Optional[Any] = accelerator.prepare(
_a , _a , _a , _a , _a )
# We need to keep track of how many total steps we have iterated over
lowercase_ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase_ :int = 0
# Now we train the model
lowercase_ :str = {}
for epoch in range(_a , _a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_a ):
lowercase_ :Optional[Any] = model(**_a )
lowercase_ :Dict = outputs.loss
lowercase_ :Dict = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase_ :Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(_a , _a )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_a , )
parser.add_argument(
'''--output_dir''' , type=_a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=_a , default=_a , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=_a , default=3_2_0 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=_a , default=1_6_0 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=_a , default=1 , help='''Number of train epochs.''' , )
lowercase_ :Dict = parser.parse_args()
lowercase_ :Dict = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 252 | 1 |
def _lowercase ( UpperCamelCase_ = 50000000 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE__ = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE__ = primea * primea * primea * primea
SCREAMING_SNAKE_CASE__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 176 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Any =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(('num_inference_steps', 25),)
def lowerCamelCase ( self : Optional[Any] , **lowercase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**lowercase_ )
return config
def lowerCamelCase ( self : Optional[int] , lowercase_ : Optional[Any]=0 , **lowercase_ : Any ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Optional[int] =self.dummy_sample
_lowerCamelCase : Dict =0.1 * sample
_lowerCamelCase : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : int =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : str =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : int =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Dict =scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : str =dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase , _lowerCamelCase : Any =sample, sample
for t in range(lowercase_ , time_step + scheduler.config.solver_order + 1 ):
_lowerCamelCase : List[str] =scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase ( self : str , lowercase_ : str=0 , **lowercase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : int =dict(self.forward_default_kwargs )
_lowerCamelCase : Dict =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Optional[int] =self.dummy_sample
_lowerCamelCase : Any =0.1 * sample
_lowerCamelCase : int =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Optional[int] =self.get_scheduler_config()
_lowerCamelCase : Any =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : Dict =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[int] =scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : str =dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase : Dict =scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Dict =new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any , lowercase_ : Union[str, Any]=None , **lowercase_ : List[str] ) -> str:
"""simple docstring"""
if scheduler is None:
_lowerCamelCase : Tuple =self.scheduler_classes[0]
_lowerCamelCase : Optional[int] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : Union[str, Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : List[Any] =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : str =10
_lowerCamelCase : Union[str, Any] =self.dummy_model()
_lowerCamelCase : Dict =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : List[str] =model(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] =scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCamelCase : int =50
_lowerCamelCase : Optional[int] =self.dummy_model()
_lowerCamelCase : Tuple =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCamelCase : Any =model(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
_lowerCamelCase : int =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCamelCase : Optional[Any] =self.full_loop(scheduler=lowercase_ )
_lowerCamelCase : Tuple =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCamelCase : Dict =DEISMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Optional[int] =DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Union[str, Any] =UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : List[str] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCamelCase : List[Any] =self.full_loop(scheduler=lowercase_ )
_lowerCamelCase : List[str] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , algorithm_type='dpmsolver++' , solver_order=lowercase_ , solver_type=lowercase_ , )
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , algorithm_type=lowercase_ , )
_lowerCamelCase : Any =self.full_loop(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , algorithm_type=lowercase_ , )
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.check_over_configs(variance_type=lowercase_ )
self.check_over_configs(variance_type='learned_range' )
def lowerCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowercase_ , time_step=0 )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Dict =self.full_loop()
_lowerCamelCase : Dict =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(use_karras_sigmas=lowercase_ )
_lowerCamelCase : Optional[Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Dict =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=lowercase_ )
_lowerCamelCase : List[str] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =self.scheduler_classes[0]
_lowerCamelCase : List[str] =self.get_scheduler_config(thresholding=lowercase_ , dynamic_thresholding_ratio=0 )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : Optional[Any] =10
_lowerCamelCase : Optional[int] =self.dummy_model()
_lowerCamelCase : List[Any] =self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : str =model(lowercase_ , lowercase_ )
_lowerCamelCase : Any =scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 199 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , __A : int , __A : int , __A : int , __A : float , __A : int , __A : int , __A : int , __A : int , __A : str , __A : bool = False , ):
super().__init__()
snake_case__ : Optional[int] = nn.Embedding(snake_case_ , snake_case_ )
snake_case__ : int = nn.Embedding(snake_case_ , snake_case_ )
snake_case__ : List[str] = False
snake_case__ : List[str] = nn.Dropout(p=snake_case_ )
snake_case__ : int = TaConfig(
vocab_size=snake_case_ , d_model=snake_case_ , num_heads=snake_case_ , d_kv=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , feed_forward_proj=snake_case_ , is_decoder=snake_case_ , is_encoder_decoder=snake_case_ , )
snake_case__ : Optional[int] = nn.ModuleList()
for lyr_num in range(snake_case_ ):
snake_case__ : Dict = TaBlock(snake_case_ )
self.encoders.append(snake_case_ )
snake_case__ : str = TaLayerNorm(snake_case_ )
snake_case__ : Dict = nn.Dropout(p=snake_case_ )
def _lowercase ( self : Optional[Any] , __A : Any , __A : int ):
snake_case__ : Tuple = self.token_embedder(snake_case_ )
snake_case__ : Tuple = encoder_input_tokens.shape[1]
snake_case__ : str = torch.arange(snake_case_ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case_ )
snake_case__ : Any = self.dropout_pre(snake_case_ )
# inverted the attention mask
snake_case__ : Tuple = encoder_input_tokens.size()
snake_case__ : List[str] = self.get_extended_attention_mask(snake_case_ , snake_case_ )
for lyr in self.encoders:
snake_case__ : Tuple = lyr(snake_case_ , snake_case_ )[0]
snake_case__ : List[str] = self.layer_norm(snake_case_ )
return self.dropout_post(snake_case_ ), encoder_inputs_mask
| 352 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any ):
snake_case__ : List[str] = b.T
snake_case__ : Union[str, Any] = np.sum(np.square(snake_case_ ) , axis=1 )
snake_case__ : Dict = np.sum(np.square(snake_case_ ) , axis=0 )
snake_case__ : Dict = np.matmul(snake_case_ , snake_case_ )
snake_case__ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple ):
snake_case__ : Tuple = x.reshape(-1 , 3 )
snake_case__ : int = squared_euclidean_distance(snake_case_ , snake_case_ )
return np.argmin(snake_case_ , axis=1 )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["pixel_values"]
def __init__( self : str , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : bool = True , **__A : Union[str, Any] , ):
super().__init__(**__A )
snake_case__ : Optional[int] = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
snake_case__ : List[Any] = get_size_dict(__A )
snake_case__ : Any = np.array(__A ) if clusters is not None else None
snake_case__ : Optional[Any] = do_resize
snake_case__ : Any = size
snake_case__ : List[Any] = resample
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = do_color_quantize
def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : int , ):
snake_case__ : List[Any] = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size["height"], size["width"]) , resample=__A , data_format=__A , **__A )
def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Optional[Union[str, ChannelDimension]] = None , ):
snake_case__ : List[str] = rescale(image=__A , scale=1 / 1_2_7.5 , data_format=__A )
snake_case__ : List[Any] = image - 1
return image
def _lowercase ( self : Dict , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Optional[bool] = None , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A : Optional[int] , ):
snake_case__ : Any = do_resize if do_resize is not None else self.do_resize
snake_case__ : Union[str, Any] = size if size is not None else self.size
snake_case__ : Union[str, Any] = get_size_dict(__A )
snake_case__ : Optional[Any] = resample if resample is not None else self.resample
snake_case__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ : Union[str, Any] = clusters if clusters is not None else self.clusters
snake_case__ : Union[str, Any] = np.array(__A )
snake_case__ : Any = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ : Optional[Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
snake_case__ : List[str] = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
snake_case__ : Union[str, Any] = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
snake_case__ : int = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ : int = np.array(__A )
snake_case__ : Dict = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ : str = images.shape[0]
snake_case__ : str = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ : Union[str, Any] = list(__A )
else:
snake_case__ : Any = [to_channel_dimension_format(__A , __A ) for image in images]
snake_case__ : Optional[int] = {"input_ids": images}
return BatchFeature(data=__A , tensor_type=__A )
| 286 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = to_pil_image(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='dict' , config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : float = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "" , **UpperCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase = get_size_dict(UpperCamelCase__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_value
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def A ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase = (size['height'], size['width'])
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, Iterable[float]] , UpperCamelCase__ : Union[float, Iterable[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCamelCase__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase , UpperCamelCase = apply_tesseract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
words_batch.append(UpperCamelCase__ )
boxes_batch.append(UpperCamelCase__ )
if do_resize:
UpperCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCamelCase = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase__ )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 28 | '''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'encodec'
def __init__( self : Optional[Any] , lowercase__ : Dict=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase__ : List[str]=24_000 , lowercase__ : int=1 , lowercase__ : Tuple=False , lowercase__ : Dict=None , lowercase__ : Dict=None , lowercase__ : List[str]=128 , lowercase__ : int=32 , lowercase__ : Tuple=1 , lowercase__ : List[str]=[8, 5, 4, 2] , lowercase__ : str="weight_norm" , lowercase__ : Optional[int]=7 , lowercase__ : Optional[int]=7 , lowercase__ : Union[str, Any]=3 , lowercase__ : List[Any]=2 , lowercase__ : str=True , lowercase__ : List[Any]="reflect" , lowercase__ : Tuple=2 , lowercase__ : Optional[Any]=2 , lowercase__ : Tuple=1.0 , lowercase__ : int=1_024 , lowercase__ : str=None , lowercase__ : Any=True , **lowercase__ : Optional[int] , ):
'''simple docstring'''
lowerCAmelCase__ = target_bandwidths
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = audio_channels
lowerCAmelCase__ = normalize
lowerCAmelCase__ = chunk_length_s
lowerCAmelCase__ = overlap
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_filters
lowerCAmelCase__ = num_residual_layers
lowerCAmelCase__ = upsampling_ratios
lowerCAmelCase__ = norm_type
lowerCAmelCase__ = kernel_size
lowerCAmelCase__ = last_kernel_size
lowerCAmelCase__ = residual_kernel_size
lowerCAmelCase__ = dilation_growth_rate
lowerCAmelCase__ = use_causal_conv
lowerCAmelCase__ = pad_mode
lowerCAmelCase__ = compress
lowerCAmelCase__ = num_lstm_layers
lowerCAmelCase__ = trim_right_ratio
lowerCAmelCase__ = codebook_size
lowerCAmelCase__ = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""")
super().__init__(**lowercase__)
@property
def __snake_case ( self : Dict):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def __snake_case ( self : List[str]):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def __snake_case ( self : Any):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 119 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'imagenet-1k-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase__ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1_0_0_0 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def __lowerCamelCase ( lowerCAmelCase__ ):
if "stem.conv" in name:
lowerCAmelCase__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase__ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowerCAmelCase__ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowerCAmelCase__ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase__ = 'bit.encoder.' + name
return name
def __lowerCamelCase ( ):
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
lowerCAmelCase__ = get_config(lowerCAmelCase__ )
# load original model from timm
lowerCAmelCase__ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
lowerCAmelCase__ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCAmelCase__ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowerCAmelCase__ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowerCAmelCase__ = transform.transforms
lowerCAmelCase__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase__ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowerCAmelCase__ = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase__ = model(lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase__ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 119 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase : Optional[Any] = tuple[int, int]
class A__ :
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = pos_x
UpperCamelCase : int = pos_y
UpperCamelCase : Union[str, Any] = (pos_y, pos_x)
UpperCamelCase : Optional[int] = goal_x
UpperCamelCase : str = goal_y
UpperCamelCase : Dict = g_cost
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = self.calculate_heuristic()
UpperCamelCase : List[str] = self.g_cost + self.h_cost
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.pos_x - self.goal_x
UpperCamelCase : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , A_ ):
'''simple docstring'''
return self.f_cost < other.f_cost
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A_ )
UpperCamelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , A_ )
UpperCamelCase : int = [self.start]
UpperCamelCase : list[Node] = []
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
UpperCamelCase : Tuple = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = []
for action in delta:
UpperCamelCase : List[Any] = parent.pos_x + action[1]
UpperCamelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_ , A_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A_ , ) )
return successors
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = node
UpperCamelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase : int = current_node.parent
path.reverse()
return path
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = AStar(A_ , A_ )
UpperCamelCase : Any = AStar(A_ , A_ )
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase : List[Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase : List[str] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_ , A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
UpperCamelCase : Union[str, Any] = current_bwd_node
UpperCamelCase : Union[str, Any] = current_fwd_node
UpperCamelCase : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.fwd_astar.retrace_path(A_ )
UpperCamelCase : Optional[int] = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase : Optional[Any] = (0, 0)
__lowerCamelCase : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : List[Any] = time.time()
__lowerCamelCase : str = AStar(init, goal)
__lowerCamelCase : int = a_star.search()
__lowerCamelCase : List[str] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__lowerCamelCase : Union[str, Any] = time.time()
__lowerCamelCase : Any = BidirectionalAStar(init, goal)
__lowerCamelCase : Optional[int] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 52 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowercase : str = logging.get_logger(__name__)
lowercase : List[Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_snake_case = model
_snake_case = kwargs.get('model_save_dir' , lowerCAmelCase_ )
_snake_case = kwargs.get('latest_model_name' , lowerCAmelCase_ )
def __call__( self , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = {k: np.array(lowerCAmelCase_ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase_ , lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_snake_case = 'CPUExecutionProvider'
return ort.InferenceSession(lowerCAmelCase_ , providers=[provider] , sess_options=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_snake_case = self.model_save_dir.joinpath(self.latest_model_name )
_snake_case = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_snake_case = self.model_save_dir.joinpath(lowerCAmelCase_ )
if src_path.exists():
_snake_case = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
def lowerCamelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ , ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase_ ):
_snake_case = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
_snake_case = Path(lowerCAmelCase_ )
# load model from hub
else:
# download model
_snake_case = hf_hub_download(
repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , )
_snake_case = Path(lowerCAmelCase_ ).parent
_snake_case = Path(lowerCAmelCase_ ).name
_snake_case = OnnxRuntimeModel.load_model(lowerCAmelCase_ , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
return cls(model=lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = None
if len(str(lowerCAmelCase_ ).split('@' ) ) == 2:
_snake_case , _snake_case = model_id.split('@' )
return cls._from_pretrained(
model_id=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 359 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = np.array(__A )
_snake_case = npimg.shape
return {"hash": hashimage(__A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowercase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MaskGenerationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_snake_case = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'facebook/sam-vit-huge'
_snake_case = pipeline('mask-generation' , model=lowerCAmelCase_ )
_snake_case = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
] , )
| 160 | 0 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
a :List[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.scheduler_classes[0]
a :Optional[Any] = self.get_scheduler_config()
a :Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.scheduler_classes[0]
a :Any = self.get_scheduler_config()
a :Union[str, Any] = scheduler_class(**_lowerCamelCase )
a :Dict = len(_lowerCamelCase )
a :Tuple = self.dummy_model()
a :Optional[Any] = self.dummy_sample_deter
a :str = self.dummy_sample_deter + 0.1
a :Optional[Any] = self.dummy_sample_deter - 0.1
a :int = samplea.shape[0]
a :List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
a :List[str] = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
a :str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
a :List[Any] = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
a :str = torch.sum(torch.abs(_lowerCamelCase ) )
a :str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.scheduler_classes[0]
a :Any = self.get_scheduler_config()
a :Dict = scheduler_class(**_lowerCamelCase )
a :Tuple = len(_lowerCamelCase )
a :Optional[int] = self.dummy_model()
a :Optional[Any] = self.dummy_sample_deter
a :Optional[int] = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
a :Dict = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a :int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
a :Any = pred_prev_sample
a :int = torch.sum(torch.abs(_lowerCamelCase ) )
a :Dict = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.scheduler_classes[0]
a :List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
a :Any = scheduler_class(**_lowerCamelCase )
a :Tuple = len(_lowerCamelCase )
a :str = self.dummy_model()
a :List[str] = self.dummy_sample_deter
a :Any = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a :str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
a :List[str] = pred_prev_sample
a :Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
a :Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.scheduler_classes[0]
a :str = self.get_scheduler_config()
a :Tuple = scheduler_class(**_lowerCamelCase )
a :int = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
a :List[str] = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
a :str = -1
else:
a :List[str] = timesteps[i + 1]
a :Union[str, Any] = scheduler.previous_timestep(_lowerCamelCase )
a :List[str] = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.scheduler_classes[0]
a :List[Any] = self.get_scheduler_config()
a :Dict = scheduler_class(**_lowerCamelCase )
a :int = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.scheduler_classes[0]
a :Any = self.get_scheduler_config()
a :str = scheduler_class(**_lowerCamelCase )
a :Tuple = [100, 87, 50, 1, 0]
a :Optional[Any] = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.scheduler_classes[0]
a :Optional[Any] = self.get_scheduler_config()
a :Dict = scheduler_class(**_lowerCamelCase )
a :int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 94 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 8
# DPR tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _lowerCamelCase ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Any , __A : bool ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Optional[Any] ):
import torch
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 53 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : str =logging.get_logger(__name__)
lowerCAmelCase__ : str ={'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ : str ={
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase__ : Any ={
'''camembert-base''': 512,
}
lowerCAmelCase__ : Optional[int] ='''▁'''
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = VOCAB_FILES_NAMES
UpperCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=["<s>NOTUSED", "</s>NOTUSED"] , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
__SCREAMING_SNAKE_CASE = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__SCREAMING_SNAKE_CASE = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
__SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids )
__SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self , _A , _A = None , _A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self , _A ):
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def _A ( self , _A ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_A )
def _A ( self , _A ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(_A )
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 118 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : List[str] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ['''small''', '''medium''', '''large''']
__UpperCamelCase = '''lm_head.decoder.weight'''
__UpperCamelCase = '''lm_head.weight'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = torch.load(UpperCAmelCase )
snake_case_ = d.pop(UpperCAmelCase )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
torch.save(UpperCAmelCase , os.path.join(UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
__UpperCamelCase = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 69 | """simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__UpperCamelCase = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = None
# source code of `config_class`
snake_case_ = inspect.getsource(UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ = ckpt_name
break
return checkpoint
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase )
snake_case_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = '\n'.join(sorted(UpperCAmelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 69 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : Any, __snake_case : str, __snake_case : Dict ):
"""simple docstring"""
if len(lowercase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowercase__ )
or left < -len(lowercase__ )
or right >= len(lowercase__ )
or right < -len(lowercase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
A__ : int =(left + right) >> 1 # the middle
A__ : Tuple =find_max(lowercase__, lowercase__, lowercase__ ) # find max in range[left, mid]
A__ : str =find_max(lowercase__, mid + 1, lowercase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt_bigcode'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=5_02_57 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : str=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=1e-5 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=5_02_56 , lowerCAmelCase_ : Dict=5_02_56 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =vocab_size
A__ : Optional[Any] =n_positions
A__ : List[str] =n_embd
A__ : str =n_layer
A__ : Optional[int] =n_head
A__ : Optional[int] =n_inner
A__ : int =activation_function
A__ : int =resid_pdrop
A__ : int =embd_pdrop
A__ : Dict =attn_pdrop
A__ : Any =layer_norm_epsilon
A__ : List[Any] =initializer_range
A__ : Dict =scale_attn_weights
A__ : Any =use_cache
A__ : List[Any] =attention_softmax_in_fpaa
A__ : Optional[int] =scale_attention_softmax_in_fpaa
A__ : Dict =multi_query
A__ : List[str] =bos_token_id
A__ : Any =eos_token_id
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 136 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=UpperCAmelCase_ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=UpperCAmelCase_ , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=UpperCAmelCase_ , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=UpperCAmelCase_ , default=0 , help="""cuda_id.""" , )
a__ : str = parser.parse_args()
return args
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Optional[Any]) -> str:
"""simple docstring"""
if not len(UpperCAmelCase_) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""")
a__ : List[Any] = imgs[0].size
a__ : str = Image.new("""RGB""" , size=(cols * w, rows * h))
a__ : Optional[Any] = grid.size
for i, img in enumerate(UpperCAmelCase_):
grid.paste(UpperCAmelCase_ , box=(i % cols * w, i // cols * h))
return grid
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Tuple="robotic cat with wings" , _lowercase : Any=7.5 , _lowercase : int=50 , _lowercase : List[Any]=1 , _lowercase : Optional[int]=42 , ) -> int:
"""simple docstring"""
a__ : Optional[int] = torch.Generator(pipeline.device).manual_seed(UpperCAmelCase_)
a__ : Optional[int] = pipeline(
UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , ).images
a__ : Dict = int(math.sqrt(UpperCAmelCase_))
a__ : Optional[int] = image_grid(UpperCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows)
return grid, images
_lowercase : str =parse_args()
# Load models and create wrapper for stable diffusion
_lowercase : Optional[Any] =CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
_lowercase : Dict =CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
_lowercase : int =AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
_lowercase : Tuple =UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
_lowercase : int =StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowercase : str =lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
_lowercase : int =load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
_lowercase : Dict =unet.to(torch.device("cuda", args.cuda_id))
_lowercase : List[Any] =pipeline.to(unet.device)
_lowercase , _lowercase : str =generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
_lowercase : List[Any] =os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 170 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase : Tuple = sum(array[:k] )
for i in range(len(UpperCAmelCase_ ) - k ):
UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase : List[Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase__ = [randint(-1000, 1000) for i in range(100)]
lowercase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 151 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , )-> Tuple:
lowerCamelCase_ =size if size is not None else {'shortest_edge': 18}
lowerCamelCase_ =crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =min_resolution
lowerCamelCase_ =max_resolution
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean
lowerCamelCase_ =image_std
def _snake_case ( self )-> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase):
_UpperCamelCase:int = LevitImageProcessor if is_vision_available() else None
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =LevitImageProcessingTester(self )
@property
def _snake_case ( self )-> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self )-> int:
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self )-> str:
pass
def _snake_case ( self )-> List[Any]:
# Initialize image_processing
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self )-> List[Any]:
# Initialize image_processing
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self )-> int:
# Initialize image_processing
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 351 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__A : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__A : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="auto" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=500 , _SCREAMING_SNAKE_CASE="gpt2-large" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=25 , )-> List[str]:
lowerCamelCase_ =compute_mauve(
p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , )
return out
| 49 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase=[2, 2, 3, 2] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=["stage2", "stage3", "stage4"] , __lowerCAmelCase=[2, 3, 4] , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_stages
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = out_features
lowerCamelCase__ = out_indices
lowerCamelCase__ = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = ConvNextModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = ConvNextForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = ConvNextBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ = None
lowerCamelCase__ = ConvNextBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ConvNextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = ConvNextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase__() -> Any:
'''simple docstring'''
lowerCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(__lowerCAmelCase )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**__lowerCAmelCase )
# verify the logits
lowerCamelCase__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowerCamelCase__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@require_torch
class __A ( unittest.TestCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (ConvNextBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = ConvNextConfig
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ConvNextModelTester(self )
| 209 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_a = random.Random()
def lowerCAmelCase__(__snake_case ,__snake_case=1.0 ,__snake_case=None ,__snake_case=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
lowerCamelCase__ = global_rng
lowerCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=2_0_0_0 , __lowerCAmelCase=2_4 , __lowerCAmelCase=2_4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1_6_0_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = min_seq_length
lowerCamelCase__ = max_seq_length
lowerCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ = feature_size
lowerCamelCase__ = num_mel_bins
lowerCamelCase__ = padding_value
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = return_attention_mask
lowerCamelCase__ = do_normalize
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
def _flatten(__lowerCAmelCase ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
lowerCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase__ = np.asarray(__lowerCAmelCase )
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ = [None, 1_6, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = [np.sum(__lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ = [None, 1_6, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = [np.sum(__lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding='''max_length''' , max_length=4 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding='''longest''' , max_length=4 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding='''longest''' , max_length=1_6 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __lowerCamelCase ( self ):
'''simple docstring'''
import torch
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase__ = ds.sort('''id''' ).select(range(__lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase__ = self._load_datasamples(1 )
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __lowerCAmelCase , atol=1E-4 ) )
| 209 | 1 |
__SCREAMING_SNAKE_CASE = [0, 2, 4, 6, 8]
__SCREAMING_SNAKE_CASE = [1, 3, 5, 7, 9]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
A : Union[str, Any] = 0
for digit in range(10 ):
A : List[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase )
return result
A : Optional[Any] = 0
for digita in range(10 ):
A : Dict = digita
if (remainder + digita) % 2 == 0:
A : str = ODD_DIGITS
else:
A : str = EVEN_DIGITS
for digita in other_parity_digits:
A : Tuple = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def UpperCAmelCase ( _lowerCamelCase = 9 ):
A : List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""") | 370 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase ):
A : List[Any] = R"\w+[.]\d+"
A : Optional[Any] = re.findall(_lowerCamelCase , _lowerCamelCase )
for pat in pats:
A : int = key.replace(_lowerCamelCase , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A : List[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A : List[Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[int] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : List[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
A : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : List[str] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : Optional[Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=42 ):
# Step 1: Convert pytorch tensor to numpy
A : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A : Dict = flax_model.init_weights(PRNGKey(_lowerCamelCase ) )
A : Dict = flatten_dict(_lowerCamelCase )
A : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : Tuple = rename_key(_lowerCamelCase )
A : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
A , A : str = rename_key_and_reshape_tensor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A : Union[str, Any] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase ) | 256 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.