code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import requests
def __snake_case ( _UpperCAmelCase ):
__a = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def __snake_case ( _UpperCAmelCase = 10 ):
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def __snake_case ( _UpperCAmelCase = 10 ):
__a = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('''* [{title}]({url})'''.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 705 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _A ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=99 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = True
UpperCamelCase__ : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = FlaxBertModelTester(self)
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = FlaxBertModel.from_pretrained('''bert-base-cased''')
__a = model(np.ones((1, 1)))
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
| 706 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :List[str] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__snake_case :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__snake_case :str = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
__snake_case :Optional[int] = {
'''169M''': 768,
'''430M''': 1024,
'''1B5''': 2048,
'''3B''': 2560,
'''7B''': 4096,
'''14B''': 5120,
}
def __snake_case ( _UpperCAmelCase ):
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_UpperCAmelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
__a = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__a = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__a = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , _UpperCAmelCase )
# ffn -> feed_forward
__a = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , _UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__a = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__a = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__a = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__a = '''rwkv.''' + name
__a = weight
return state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__a = 50277
__a = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase )
__a = len(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
__a = RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_UpperCAmelCase , _UpperCAmelCase )
__a = torch.load(_UpperCAmelCase , map_location='''cpu''' )
__a = convert_state_dict(_UpperCAmelCase )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if index is not None:
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# Save the index as well
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__a = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
model.push_to_hub(_UpperCAmelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
__snake_case :int = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 708 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 255.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 709 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
__a = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__a = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__a = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_slow.save_pretrained(self.tmpdirname)
__a = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE)
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_fast.save_pretrained(self.tmpdirname)
__a = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__a = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
__a = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = self.prepare_image_inputs()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = processor(text=__SCREAMING_SNAKE_CASE)
__a = tokenizer(__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__SCREAMING_SNAKE_CASE)
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 710 |
import os
import sys
import unittest
__snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''')
__snake_case :Any = '''
{0} = None
'''
__snake_case :Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__snake_case :str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(__SCREAMING_SNAKE_CASE)
__a = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''')
__a = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''')
__a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''')
__a = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
| 60 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _A ( tf.keras.layers.Layer ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : int = None):
'''simple docstring'''
super().__init__()
__a = pad_token_id
__a = max_length
__a = vocab
__a = merges
__a = BytePairTokenizer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sequence_length=__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : GPTaTokenizer , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = [''' '''.join(__SCREAMING_SNAKE_CASE) for m in tokenizer.bpe_ranks.keys()]
__a = tokenizer.get_vocab()
return cls(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return cls.from_tokenizer(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return cls(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = None):
'''simple docstring'''
__a = self.tf_tokenizer(__SCREAMING_SNAKE_CASE)
__a = tf.ones_like(__SCREAMING_SNAKE_CASE)
if self.pad_token_id is not None:
# pad the tokens up to max length
__a = max_length if max_length is not None else self.max_length
if max_length is not None:
__a , __a = pad_model_inputs(
__SCREAMING_SNAKE_CASE , max_seq_length=__SCREAMING_SNAKE_CASE , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 711 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case :Tuple = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case :Optional[Any] = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case :int = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case :int = '''
{0} = None
'''
__snake_case :Any = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case :List[str] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __snake_case ( _UpperCAmelCase ):
__a = _re_backend.findall(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
return "_and_".join(_UpperCAmelCase )
def __snake_case ( ):
with open(os.path.join(_UpperCAmelCase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Get to the point we do the actual imports for type checking
__a = 0
__a = {}
# Go through the end of the file
while line_index < len(_UpperCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__a = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__a = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCAmelCase ) and len(lines[line_index] ) > 1:
__a = lines[line_index]
__a = _re_single_line_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCAmelCase ) > 0:
__a = objects
else:
line_index += 1
return backend_specific_objects
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCAmelCase , _UpperCAmelCase )
else:
return DUMMY_CLASS.format(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase=None ):
if backend_specific_objects is None:
__a = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__a = {}
for backend, objects in backend_specific_objects.items():
__a = '''[''' + ''', '''.join(f'"{b}"' for b in backend.split('''_and_''' ) ) + ''']'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCAmelCase , _UpperCAmelCase ) for o in objects] )
__a = dummy_file
return dummy_files
def __snake_case ( _UpperCAmelCase=False ):
__a = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__a = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__a = os.path.join(_UpperCAmelCase , '''utils''' )
__a = {
backend: os.path.join(_UpperCAmelCase , f'dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py' )
for backend in dummy_files.keys()
}
__a = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.read()
else:
__a = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py as the main '
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f'diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py. Run `make fix-copies` '
'''to fix this.''' )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case :Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 712 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :List[str] = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :Optional[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :Any = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __snake_case ( _UpperCAmelCase ):
for param in module.parameters():
__a = False
def __snake_case ( ):
__a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __snake_case ( _UpperCAmelCase ):
__a = plt.imshow(_UpperCAmelCase )
fig.axes.get_xaxis().set_visible(_UpperCAmelCase )
fig.axes.get_yaxis().set_visible(_UpperCAmelCase )
plt.show()
def __snake_case ( ):
__a = datetime.now()
__a = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 713 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 | 0 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case :List[str] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case :Any = parser.parse_args()
__snake_case :Optional[int] = '''cpu'''
__snake_case :List[str] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case :Any = '''path-to-your-trained-model'''
__snake_case :Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case :Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case :Tuple = pipe.to(device)
# to channels last
__snake_case :List[str] = pipe.unet.to(memory_format=torch.channels_last)
__snake_case :Tuple = pipe.vae.to(memory_format=torch.channels_last)
__snake_case :Tuple = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case :Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case :Any = torch.randn(2, 4, 64, 64)
__snake_case :Union[str, Any] = torch.rand(1) * 999
__snake_case :str = torch.randn(2, 77, 768)
__snake_case :Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
__snake_case :Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case :Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case :Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case :Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case :Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case :int = 666
__snake_case :Union[str, Any] = torch.Generator(device).manual_seed(seed)
__snake_case :int = {'''generator''': generator}
if args.steps is not None:
__snake_case :Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case :int = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 714 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ : List[str] = '''FlavaImageProcessor'''
UpperCamelCase__ : str = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
__a = kwargs.pop('''feature_extractor''')
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[ImageInput] = None , __SCREAMING_SNAKE_CASE : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
__a = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if images is not None:
__a = self.image_processor(
__SCREAMING_SNAKE_CASE , return_image_mask=__SCREAMING_SNAKE_CASE , return_codebook_pixels=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if text is not None and images is not None:
encoding.update(__SCREAMING_SNAKE_CASE)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE) , tensor_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 715 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 0 |
from __future__ import annotations
from collections import deque
class _A :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : list[str]):
'''simple docstring'''
__a = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []})
for keyword in keywords:
self.add_keyword(__SCREAMING_SNAKE_CASE)
self.set_fail_transitions()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = 0
for character in keyword:
__a = self.find_next_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
__a = len(self.adlist) - 1
else:
__a = next_state
self.adlist[current_state]["output"].append(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = deque()
for node in self.adlist[0]["next_states"]:
q.append(__SCREAMING_SNAKE_CASE)
__a = 0
while q:
__a = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__SCREAMING_SNAKE_CASE)
__a = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__SCREAMING_SNAKE_CASE , self.adlist[child]['''value''']) is None
and state != 0
):
__a = self.adlist[state]['''fail_state''']
__a = self.find_next_state(
__SCREAMING_SNAKE_CASE , self.adlist[child]['''value'''])
if self.adlist[child]["fail_state"] is None:
__a = 0
__a = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = {} # returns a dict with keywords and list of its occurrences
__a = 0
for i in range(len(__SCREAMING_SNAKE_CASE)):
while (
self.find_next_state(__SCREAMING_SNAKE_CASE , string[i]) is None
and current_state != 0
):
__a = self.adlist[current_state]['''fail_state''']
__a = self.find_next_state(__SCREAMING_SNAKE_CASE , string[i])
if next_state is None:
__a = 0
else:
__a = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__a = []
result[key].append(i - len(__SCREAMING_SNAKE_CASE) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected string as input, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
__a = input_str.split('''_''' )
__a = 0 if use_pascal else 1
__a = words[start_index:]
__a = [word[0].upper() + word[1:] for word in words_to_capitalize]
__a = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[Any] = IFInpaintingPipeline
UpperCamelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=0):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : int):
'''simple docstring'''
self._test_save_load_local()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 717 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str)
def __repr__( self : Tuple):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return Version(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return other
raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.')
def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
try:
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
return self.tuple < other.tuple
def __hash__( self : Optional[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.version_str
def __snake_case ( _UpperCAmelCase ):
__a = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 60 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :str = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__snake_case :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
__a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_UpperCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_UpperCAmelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_UpperCAmelCase )
return parser.parse_args()
def __snake_case ( ):
__a = parse_args()
# Import training_script as a module.
__a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a = script_fpath.stem
__a = importlib.import_module(_UpperCAmelCase )
# Patch sys.argv
__a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 0
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE).to_dict()
config_dict.pop('''image_processor_type''')
__a = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE)
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE)
config.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
# make sure private variable is not incorrectly saved
__a = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier'''):
__a = AutoImageProcessor.from_pretrained('''clip-base''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE)
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
__a = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = True
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE)
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# If remote code is not set, the default is to use local
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 720 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __snake_case ( _UpperCAmelCase ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ):
__a = '''mock-s3-bucket'''
__a = f's3://{mock_bucket}'
__a = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path.startswith('''s3://''' ) is False
__a = '''./local/path'''
__a = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path == new_dataset_path
def __snake_case ( _UpperCAmelCase ):
__a = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is True
__a = fsspec.filesystem('''file''' )
__a = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
__a = input_paths[compression_fs_class.protocol]
if input_path is None:
__a = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
__a = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
__a = os.path.basename(_UpperCAmelCase )
__a = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCAmelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
__a = compressed_file_paths[protocol]
__a = '''dataset.jsonl'''
__a = f'{protocol}://{member_file_path}::{compressed_file_path}'
__a , *__a = fsspec.get_fs_token_paths(_UpperCAmelCase )
assert fs.isfile(_UpperCAmelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = hf_api.dataset_info(_UpperCAmelCase , token=_UpperCAmelCase )
__a = HfFileSystem(repo_info=_UpperCAmelCase , token=_UpperCAmelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCAmelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __snake_case ( ):
__a = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCAmelCase , _UpperCAmelCase , clobber=_UpperCAmelCase )
with pytest.warns(_UpperCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCAmelCase ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 721 |
from __future__ import annotations
__snake_case :Optional[Any] = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :Optional[Any] = 8
__snake_case :Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 60 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__snake_case :Any = '''__DUMMY_TRANSFORMERS_USER__'''
__snake_case :Optional[int] = '''Dummy User'''
__snake_case :str = '''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'''
__snake_case :Optional[int] = '''https://hub-ci.huggingface.co'''
__snake_case :List[str] = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
__snake_case :str = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
__snake_case :List[str] = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCAmelCase )
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCAmelCase )
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCAmelCase )
@pytest.fixture
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
HfFolder.save_token(_UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __snake_case ( ):
return HfApi(endpoint=_UpperCAmelCase )
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = HfFolder.get_token()
HfFolder.save_token(_UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCAmelCase )
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
def _cleanup_repo(_UpperCAmelCase ):
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
@contextmanager
def _temporary_repo(_UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = f'repo_txt_data-{int(time.time() * 1_0E3 )}'
__a = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase )
hf_api.upload_file(
token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = f'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}'
__a = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase )
hf_api.upload_file(
token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = f'repo_zipped_img_data-{int(time.time() * 1_0E3 )}'
__a = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase )
hf_api.upload_file(
token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return hf_private_dataset_repo_zipped_img_data_
| 700 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 0 ):
__a = length or len(_UpperCAmelCase )
__a = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__a , __a = list_data[i + 1], list_data[i]
__a = True
return list_data if not swapped else bubble_sort(_UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase__ : int = True
UpperCamelCase__ : Dict = '''ml.p3.2xlarge'''
UpperCamelCase__ : List[Any] = '''accelerate_sagemaker_execution_role'''
UpperCamelCase__ : int = '''hf-sm'''
UpperCamelCase__ : List[Any] = '''us-east-1'''
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : List[str] = '''accelerate-sagemaker-1'''
UpperCamelCase__ : Optional[int] = '''1.6'''
UpperCamelCase__ : List[str] = '''4.4'''
UpperCamelCase__ : List[str] = '''train.py'''
UpperCamelCase__ : Tuple = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase__ : Optional[int] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''do_train'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''epochs'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''learning_rate'''] , __SCREAMING_SNAKE_CASE)
assert isinstance(converted_args['''max_steps'''] , __SCREAMING_SNAKE_CASE)
with pytest.raises(__SCREAMING_SNAKE_CASE):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 702 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 60 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case :Tuple = 5_0003
__snake_case :int = 5_0002
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = PLBartTokenizer
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='''base''' , keep_accents=__SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='''base''' , keep_accents=__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) for x in range(end - 4 , __SCREAMING_SNAKE_CASE)]
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''])
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(
tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='''multi''' , keep_accents=__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) for x in range(end - 7 , __SCREAMING_SNAKE_CASE)]
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''])
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(
tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase__ : Optional[int] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase__ : Any = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase__ : Any = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCamelCase ( cls : str):
'''simple docstring'''
__a = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''')
__a = 1
return cls
def _lowerCamelCase ( self : int):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids)
__a = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
__a = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE)
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , __SCREAMING_SNAKE_CASE)
__a = 10
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__''']) , [50_004, 50_001])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = PLBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE)
@require_torch
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , __SCREAMING_SNAKE_CASE)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , )
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors='''pt''')
__a = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=10 , return_tensors='''pt''')
__a = targets['''input_ids''']
__a = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''')
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 703 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __snake_case ( _UpperCAmelCase ):
if "model" in orig_key:
__a = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
__a = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
__a = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
__a = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
__a = orig_key.split('''.''' )[0].split('''_''' )[-1]
__a = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
__a = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
__a = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
__a = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
__a = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
__a = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
__a = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
__a = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
__a = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
__a = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
__a = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
__a = '''yoso.''' + orig_key
return orig_key
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__a = val
__a = orig_state_dict['''cls.predictions.decoder.bias''']
__a = torch.arange(_UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = torch.load(_UpperCAmelCase , map_location='''cpu''' )['''model_state_dict''']
__a = YosoConfig.from_json_file(_UpperCAmelCase )
__a = YosoForMaskedLM(_UpperCAmelCase )
__a = convert_checkpoint_helper(config.max_position_embeddings , _UpperCAmelCase )
print(model.load_state_dict(_UpperCAmelCase ) )
model.eval()
model.save_pretrained(_UpperCAmelCase )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[int] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 704 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 60 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 705 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__snake_case :List[str] = list[list[float | int]]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
__a = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
__a = matrix[row][col]
__a = vector[row][0]
__a = 0
__a = 0
while row < size and col < size:
# pivoting
__a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__a , __a = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
__a = augmented[rowa][col] / augmented[row][col]
__a = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
__a = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def __snake_case ( _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
__a = [[0] for _ in range(_UpperCAmelCase )]
__a = 42
__a = 42
__a = 42
__a = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
__a = (x_val + 1) ** (size - col - 1)
__a = y_val
__a = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def __snake_case ( _UpperCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __snake_case ( _UpperCAmelCase = question_function , _UpperCAmelCase = 10 ):
__a = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__a = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__a = 0
__a = 42
__a = 42
for poly in polynomials:
__a = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 706 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__snake_case :Optional[int] = tuple[int, int]
class _A :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : set[int] , __SCREAMING_SNAKE_CASE : Mapping[EdgeT, int]):
'''simple docstring'''
__a = vertices
__a = {
(min(__SCREAMING_SNAKE_CASE), max(__SCREAMING_SNAKE_CASE)): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : EdgeT , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
self.vertices.add(edge[0])
self.vertices.add(edge[1])
__a = weight
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Graph({min(self.vertices)} , {})
__a = 42
__a = 42
__a = 42
__a = 42
while len(subgraph.vertices) < len(self.vertices):
__a = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a = edge
__a = weight
subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return subgraph
def __snake_case ( _UpperCAmelCase = "p107_network.txt" ):
__a = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__a = {}
__a = 42
__a = 42
__a = 42
with open(_UpperCAmelCase ) as f:
__a = f.read().strip().split('''\n''' )
__a = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
__a = int(adjaceny_matrix[edgea][edgea] )
__a = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
__a = graph.prims_algorithm()
__a = sum(graph.edges.values() )
__a = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 707 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__snake_case :Optional[Any] = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[Any] = ['''pixel_values''']
def __init__( self : Any , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = size if size is not None else {'''shortest_edge''': 224}
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE)
__a = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
__a = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''')
__a = do_resize
__a = size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_center_crop
__a = crop_size
__a = do_flip_channel_order
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE)
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}')
__a = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE)
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = get_size_dict(__SCREAMING_SNAKE_CASE)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None):
'''simple docstring'''
return flip_channel_order(__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__a = size if size is not None else self.size
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE)
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''')
__a = make_list_of_images(__SCREAMING_SNAKE_CASE)
if not valid_images(__SCREAMING_SNAKE_CASE):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
# All transformations expect numpy arrays.
__a = [to_numpy_array(__SCREAMING_SNAKE_CASE) for image in images]
if do_resize:
__a = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE) for image in images]
if do_rescale:
__a = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__a = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE) for image in images]
__a = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for image in images]
__a = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Tuple] = None):
'''simple docstring'''
__a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(__SCREAMING_SNAKE_CASE):
__a = target_sizes.numpy()
__a = []
for idx in range(len(__SCREAMING_SNAKE_CASE)):
__a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE)
__a = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__SCREAMING_SNAKE_CASE)
else:
__a = logits.argmax(dim=1)
__a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 708 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 0 |
import os
def __snake_case ( ):
with open(os.path.dirname(_UpperCAmelCase ) + '''/grid.txt''' ) as f:
__a = [] # noqa: E741
for _ in range(20 ):
l.append([int(_UpperCAmelCase ) for x in f.readline().split()] )
__a = 0
# right
for i in range(20 ):
for j in range(17 ):
__a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a = temp
# down
for i in range(17 ):
for j in range(20 ):
__a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 709 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__snake_case = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , ):
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ):
__a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__a = '''cpu'''
__a = Path(_UpperCAmelCase )
# VAE DECODER
__a = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__a = vae_decoder.config.latent_channels
# forward only through the decoder part
__a = vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__snake_case = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 710 |
import os
import sys
import unittest
__snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''')
__snake_case :Any = '''
{0} = None
'''
__snake_case :Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__snake_case :str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(__SCREAMING_SNAKE_CASE)
__a = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''')
__a = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''')
__a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''')
__a = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
| 60 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = RobertaConfig
UpperCamelCase__ : Optional[Any] = '''roberta'''
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = RobertaEmbeddings(__SCREAMING_SNAKE_CASE)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = RobertaConfig
UpperCamelCase__ : Tuple = '''roberta'''
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeRobertaModel(__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(config.hidden_dropout_prob)
__a = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=-1 , __SCREAMING_SNAKE_CASE : List[Any]=False , ):
'''simple docstring'''
__a = self.num_layers
try:
__a = self.roberta(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , )
__a = outputs[1]
__a = self.dropout(__SCREAMING_SNAKE_CASE)
__a = self.classifier(__SCREAMING_SNAKE_CASE)
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(__SCREAMING_SNAKE_CASE)
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(__SCREAMING_SNAKE_CASE)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(__SCREAMING_SNAKE_CASE)
if train_highway:
__a = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 711 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 712 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :List[str] = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :Optional[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :Any = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :int = logging.getLogger(__name__)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = np.argmax(_UpperCAmelCase , axis=1 )
return np.sum(outputs == labels )
def __snake_case ( _UpperCAmelCase ):
with open(_UpperCAmelCase , encoding='''utf_8''' ) as f:
__a = csv.reader(_UpperCAmelCase )
__a = []
next(_UpperCAmelCase ) # skip the first line
for line in tqdm(_UpperCAmelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for dataset in encoded_datasets:
__a = len(_UpperCAmelCase )
__a = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__a = np.zeros((n_batch, 2) , dtype=np.intaa )
__a = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__a = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCAmelCase ):
__a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a = with_conta
__a = with_conta
__a = len(_UpperCAmelCase ) - 1
__a = len(_UpperCAmelCase ) - 1
__a = with_conta
__a = with_conta
__a = mc_label
__a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def __snake_case ( ):
__a = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_UpperCAmelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_UpperCAmelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_UpperCAmelCase , default='''''' )
parser.add_argument('''--seed''' , type=_UpperCAmelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_UpperCAmelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_UpperCAmelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_UpperCAmelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_UpperCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_UpperCAmelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_UpperCAmelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCAmelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_UpperCAmelCase , default=6.2_5E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCAmelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_UpperCAmelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_UpperCAmelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_UpperCAmelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_UpperCAmelCase , default=374 )
parser.add_argument('''--server_ip''' , type=_UpperCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
__a = parser.parse_args()
print(_UpperCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__a = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCAmelCase , _UpperCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCAmelCase )
__a = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
model.to(_UpperCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return obj
return [tokenize_and_encode(_UpperCAmelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__a = load_rocstories_dataset(args.train_dataset )
__a = load_rocstories_dataset(args.eval_dataset )
__a = (train_dataset, eval_dataset)
__a = tokenize_and_encode(_UpperCAmelCase )
# Compute the max input length for the Transformer
__a = model.config.n_positions // 2 - 2
__a = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__a = min(_UpperCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a = pre_process_datasets(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
__a , __a = tensor_datasets[0], tensor_datasets[1]
__a = TensorDataset(*_UpperCAmelCase )
__a = RandomSampler(_UpperCAmelCase )
__a = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.train_batch_size )
__a = TensorDataset(*_UpperCAmelCase )
__a = SequentialSampler(_UpperCAmelCase )
__a = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a = args.max_steps
__a = args.max_steps // (len(_UpperCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
__a = len(_UpperCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__a = list(model.named_parameters() )
__a = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__a = AdamW(_UpperCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
__a = get_linear_schedule_with_warmup(
_UpperCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCAmelCase )
if args.do_train:
__a , __a , __a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
__a = 0
__a = 0
__a = tqdm(_UpperCAmelCase , desc='''Training''' )
for step, batch in enumerate(_UpperCAmelCase ):
__a = tuple(t.to(_UpperCAmelCase ) for t in batch )
__a , __a , __a , __a = batch
__a = model(_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
__a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a = model.module if hasattr(_UpperCAmelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a = os.path.join(args.output_dir , _UpperCAmelCase )
__a = os.path.join(args.output_dir , _UpperCAmelCase )
torch.save(model_to_save.state_dict() , _UpperCAmelCase )
model_to_save.config.to_json_file(_UpperCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__a = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCAmelCase )
if args.do_eval:
model.eval()
__a , __a = 0, 0
__a , __a = 0, 0
for batch in tqdm(_UpperCAmelCase , desc='''Evaluating''' ):
__a = tuple(t.to(_UpperCAmelCase ) for t in batch )
__a , __a , __a , __a = batch
with torch.no_grad():
__a , __a , __a , __a = model(
_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
__a = mc_logits.detach().cpu().numpy()
__a = mc_labels.to('''cpu''' ).numpy()
__a = accuracy(_UpperCAmelCase , _UpperCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__a = eval_loss / nb_eval_steps
__a = eval_accuracy / nb_eval_examples
__a = tr_loss / nb_tr_steps if args.do_train else None
__a = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 713 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = BlipImageProcessor()
__a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
__a = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
processor.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).tokenizer
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).image_processor
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__a = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__a = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
__a = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = self.prepare_image_inputs()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = processor(text=__SCREAMING_SNAKE_CASE)
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__SCREAMING_SNAKE_CASE)
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
| 714 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[int] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = '''pix2struct_text_model'''
UpperCamelCase__ : int = ['''past_key_values''']
UpperCamelCase__ : Dict = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=50_244 , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : str=2_048 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : str=128 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : str=1.0 , __SCREAMING_SNAKE_CASE : int="gelu_new" , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=True , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
__a = vocab_size
__a = hidden_size
__a = d_kv
__a = d_ff
__a = num_layers
__a = num_heads
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = use_cache
__a = eos_token_id
__a = decoder_start_token_id
# for backwards compatibility
__a = dense_act_fn
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@classmethod
def _lowerCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
__a = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''pix2struct_vision_model'''
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=768 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Tuple=2_048 , __SCREAMING_SNAKE_CASE : Optional[int]=64 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu_new" , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-6 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Tuple=1E-10 , __SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=4_096 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Tuple=128 , **__SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = hidden_size
__a = patch_embed_hidden_size
__a = d_ff
__a = dropout_rate
__a = num_hidden_layers
__a = num_attention_heads
__a = initializer_range
__a = initializer_factor
__a = attention_dropout
__a = layer_norm_eps
__a = dense_act_fn
__a = seq_len
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = d_kv
@classmethod
def _lowerCamelCase ( cls : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = '''pix2struct'''
UpperCamelCase__ : Union[str, Any] = True
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[str]=True , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''')
if vision_config is None:
__a = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''')
__a = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE)
__a = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE)
__a = self.text_config.decoder_start_token_id
__a = self.text_config.pad_token_id
__a = self.text_config.eos_token_id
__a = initializer_factor
__a = initializer_range
__a = self.initializer_range
__a = self.initializer_range
__a = is_vqa
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : PixaStructTextConfig , __SCREAMING_SNAKE_CASE : PixaStructVisionConfig , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = copy.deepcopy(self.__dict__)
__a = self.text_config.to_dict()
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 715 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Any = AudioLDMPipeline
UpperCamelCase__ : Tuple = TEXT_TO_AUDIO_PARAMS
UpperCamelCase__ : str = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase__ : Any = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__SCREAMING_SNAKE_CASE , )
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
__a = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
__a = ClapTextModelWithProjection(__SCREAMING_SNAKE_CASE)
__a = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77)
__a = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__SCREAMING_SNAKE_CASE , )
__a = SpeechTaHifiGan(__SCREAMING_SNAKE_CASE)
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=0):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
assert audio.ndim == 1
assert len(__SCREAMING_SNAKE_CASE) == 256
__a = audio[:10]
__a = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_dummy_components()
__a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = 3 * [inputs['''prompt''']]
# forward
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = 3 * [inputs.pop('''prompt''')]
__a = audioldm_pipe.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
__a = text_inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.text_encoder(
__SCREAMING_SNAKE_CASE , )
__a = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1)
__a = prompt_embeds
# forward
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.get_dummy_components()
__a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = 3 * ['''this is a negative prompt''']
__a = negative_prompt
__a = 3 * [inputs['''prompt''']]
# forward
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = 3 * [inputs.pop('''prompt''')]
__a = []
for p in [prompt, negative_prompt]:
__a = audioldm_pipe.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
__a = text_inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.text_encoder(
__SCREAMING_SNAKE_CASE , )
__a = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1)
embeds.append(__SCREAMING_SNAKE_CASE)
__a , __a = embeds
# forward
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = '''egg cracking'''
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
assert audio.ndim == 1
assert len(__SCREAMING_SNAKE_CASE) == 256
__a = audio[:10]
__a = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
__a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__a = 2
__a = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__a = 2
__a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=__SCREAMING_SNAKE_CASE).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__a = 2
__a = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__SCREAMING_SNAKE_CASE).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.vocoder.config.sampling_rate
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe(audio_length_in_s=0.0_16 , **__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
assert audio.ndim == 1
assert len(__SCREAMING_SNAKE_CASE) / vocoder_sampling_rate == 0.0_16
__a = audioldm_pipe(audio_length_in_s=0.0_32 , **__SCREAMING_SNAKE_CASE)
__a = output.audios[0]
assert audio.ndim == 1
assert len(__SCREAMING_SNAKE_CASE) / vocoder_sampling_rate == 0.0_32
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_dummy_components()
__a = AudioLDMPipeline(**__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = ['''hey''']
__a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=1)
__a = output.audios.shape
assert audio_shape == (1, 256)
__a = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__a = SpeechTaHifiGan(__SCREAMING_SNAKE_CASE).to(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe(__SCREAMING_SNAKE_CASE , num_inference_steps=1)
__a = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE)
@slow
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]="cpu" , __SCREAMING_SNAKE_CASE : List[Any]=torch.floataa , __SCREAMING_SNAKE_CASE : Optional[Any]=0):
'''simple docstring'''
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = np.random.RandomState(__SCREAMING_SNAKE_CASE).standard_normal((1, 8, 128, 16))
__a = torch.from_numpy(__SCREAMING_SNAKE_CASE).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE)
__a = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_inputs(__SCREAMING_SNAKE_CASE)
__a = 25
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE).audios[0]
assert audio.ndim == 1
assert len(__SCREAMING_SNAKE_CASE) == 81_920
__a = audio[77_230:77_240]
__a = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15])
__a = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
__a = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
__a = audioldm_pipe.to(__SCREAMING_SNAKE_CASE)
audioldm_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_inputs(__SCREAMING_SNAKE_CASE)
__a = audioldm_pipe(**__SCREAMING_SNAKE_CASE).audios[0]
assert audio.ndim == 1
assert len(__SCREAMING_SNAKE_CASE) == 81_920
__a = audio[27_780:27_790]
__a = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12])
__a = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 716 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected string as input, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
__a = input_str.split('''_''' )
__a = 0 if use_pascal else 1
__a = words[start_index:]
__a = [word[0].upper() + word[1:] for word in words_to_capitalize]
__a = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 | 0 |
import re
from filelock import FileLock
try:
import nltk
__snake_case :List[Any] = True
except (ImportError, ModuleNotFoundError):
__snake_case :Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __snake_case ( _UpperCAmelCase ):
re.sub('''<n>''' , '''''' , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
| 717 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str)
def __repr__( self : Tuple):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return Version(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return other
raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.')
def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
try:
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
return self.tuple < other.tuple
def __hash__( self : Optional[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.version_str
def __snake_case ( _UpperCAmelCase ):
__a = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 60 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __snake_case ( _UpperCAmelCase ):
if not is_accelerate_available():
return method
__a = version.parse(accelerate.__version__ ).base_version
if version.parse(_UpperCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_UpperCAmelCase , **_UpperCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_UpperCAmelCase , **_UpperCAmelCase )
return wrapper
| 718 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __snake_case ( _UpperCAmelCase ):
# getting number of pixels in the image
__a , __a = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__snake_case :int = imread('''image_data/lena.jpg''', 1)
# convert to its negative
__snake_case :Any = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 0 |
'''simple docstring'''
from math import pi
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 720 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 | 0 |
__snake_case :Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case :int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case :Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 721 |
from __future__ import annotations
__snake_case :Optional[Any] = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :Optional[Any] = 8
__snake_case :Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 60 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__(self , __a , __a=1_3 , __a=3_2 , __a=2 , __a=3 , __a=1_6 , __a=[3_2, 6_4, 1_2_8] , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=1_0 , __a=8 , __a=["stage1", "stage2"] , __a=[1, 2] , ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = parent
__snake_case : Dict = batch_size
__snake_case : Dict = image_size
__snake_case : List[str] = patch_size
__snake_case : int = num_channels
__snake_case : Dict = embed_dim
__snake_case : Dict = hidden_sizes
__snake_case : Union[str, Any] = depths
__snake_case : str = num_heads
__snake_case : Optional[int] = window_size
__snake_case : Dict = mlp_ratio
__snake_case : Dict = qkv_bias
__snake_case : Tuple = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Tuple = drop_path_rate
__snake_case : Dict = hidden_act
__snake_case : Optional[int] = use_absolute_embeddings
__snake_case : Dict = patch_norm
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = initializer_range
__snake_case : List[str] = is_training
__snake_case : str = scope
__snake_case : List[str] = use_labels
__snake_case : Optional[Any] = type_sequence_label_size
__snake_case : Optional[Any] = encoder_stride
__snake_case : Union[str, Any] = out_features
__snake_case : List[Any] = out_indices
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : Any = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = FocalNetModel(config=__a)
model.to(__a)
model.eval()
__snake_case : str = model(__a)
__snake_case : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__snake_case : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = FocalNetBackbone(config=__a)
model.to(__a)
model.eval()
__snake_case : int = model(__a)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
__snake_case : Optional[int] = None
__snake_case : List[str] = FocalNetBackbone(config=__a)
model.to(__a)
model.eval()
__snake_case : List[Any] = model(__a)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Any:
"""simple docstring"""
__snake_case : Dict = FocalNetForMaskedImageModeling(config=__a)
model.to(__a)
model.eval()
__snake_case : List[str] = model(__a)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__snake_case : Tuple = 1
__snake_case : Optional[Any] = FocalNetForMaskedImageModeling(__a)
model.to(__a)
model.eval()
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : Optional[Any] = model(__a)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.type_sequence_label_size
__snake_case : int = FocalNetForImageClassification(__a)
model.to(__a)
model.eval()
__snake_case : Any = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : Optional[Any] = 1
__snake_case : Union[str, Any] = FocalNetForImageClassification(__a)
model.to(__a)
model.eval()
__snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : str = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Optional[int] = config_and_inputs
__snake_case : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_snake_case = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = FocalNetModelTester(self)
__snake_case : List[Any] = ConfigTester(self , config_class=__a , embed_dim=3_7 , has_text_modality=__a)
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@unittest.skip(reason='FocalNet does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking')
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case ,__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Optional[Any] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__snake_case : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = model_class(__a)
__snake_case : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Dict = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : str = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(__a , __a))
__snake_case : int = outputs.hidden_states
__snake_case : List[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__a) , __a)
# FocalNet has a different seq_length
__snake_case : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(__a) , __a)
__snake_case ,__snake_case ,__snake_case ,__snake_case : int = reshaped_hidden_states[0].shape
__snake_case : Dict = (
reshaped_hidden_states[0].view(__a , __a , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case ,__snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
self.check_hidden_states_output(__a , __a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case ,__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = 3
__snake_case : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__snake_case : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width))
@slow
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = FocalNetModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = _config_zero_init(__a)
for model_class in self.all_model_classes:
__snake_case : str = model_class(config=__a)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case : str = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(__a)
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__snake_case : Union[str, Any] = image_processor(images=__a , return_tensors='pt').to(__a)
# forward pass
with torch.no_grad():
__snake_case : int = model(**__a)
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Union[str, Any] = torch.tensor([0.2_166, -0.4_368, 0.2_191]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_8_1)
@require_torch
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = (FocalNetBackbone,) if is_torch_available() else ()
_snake_case = FocalNetConfig
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = FocalNetModelTester(self) | 61 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a_ ( unittest.TestCase ):
def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__snake_case : Optional[int] = parent
__snake_case : Dict = batch_size
__snake_case : str = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : Optional[int] = do_resize
__snake_case : Optional[int] = size
__snake_case : Union[str, Any] = do_center_crop
__snake_case : List[Any] = crop_size
__snake_case : int = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : str = image_std
__snake_case : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__snake_case : Dict = []
for i in range(self.batch_size):
__snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
if torchify:
__snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4})
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8})
__snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4})
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
__snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : int = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a)
__snake_case : List[Any] = 3
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 61 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class a_ ( UpperCamelCase_ ):
_snake_case = ["""pixel_values"""]
def __init__(self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__a)
__snake_case : Tuple = size if size is not None else {'shortest_edge': 2_2_4}
__snake_case : str = get_size_dict(__a , default_to_square=__a)
__snake_case : List[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case : Optional[Any] = get_size_dict(__a , param_name='crop_size')
__snake_case : Any = do_resize
__snake_case : List[str] = size
__snake_case : List[Any] = do_center_crop
__snake_case : Tuple = crop_size
__snake_case : Dict = resample
__snake_case : List[Any] = do_rescale
__snake_case : Union[str, Any] = rescale_factor
__snake_case : Any = do_normalize
__snake_case : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Optional[int] = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" in size:
__snake_case : Any = get_resize_output_image_size(__a , size['shortest_edge'] , default_to_square=__a)
elif "height" in size and "width" in size:
__snake_case : Dict = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""")
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""")
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__snake_case : Any = to_numpy_array(__a)
if do_resize:
__snake_case : Optional[Any] = self.resize(image=__a , size=__a , resample=__a)
if do_center_crop:
__snake_case : Optional[Any] = self.center_crop(__a , size=__a)
if do_rescale:
__snake_case : List[str] = self.rescale(image=__a , scale=__a)
if do_normalize:
__snake_case : Tuple = self.normalize(image=__a , mean=__a , std=__a)
__snake_case : List[str] = to_channel_dimension_format(__a , __a)
return image
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : List[Any] = resample if resample is not None else self.resample
__snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case : Union[str, Any] = size if size is not None else self.size
__snake_case : Tuple = get_size_dict(__a , default_to_square=__a)
__snake_case : Dict = crop_size if crop_size is not None else self.crop_size
__snake_case : int = get_size_dict(__a , param_name='crop_size')
if not valid_images(__a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
__snake_case : Any = make_batched(__a)
__snake_case : List[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
__snake_case : str = {'pixel_values': videos}
return BatchFeature(data=__a , tensor_type=__a) | 61 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """vit_msn"""
def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(**__a)
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = qkv_bias | 61 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """vit_msn"""
def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(**__a)
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = qkv_bias | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__snake_case : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) )
return round(A , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 | 1 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int:
"""simple docstring"""
if not is_accelerate_available():
return method
__snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(A ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *A , **A )
return wrapper | 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
class a_ :
def __init__(self , __a) -> None:
"""simple docstring"""
__snake_case : Optional[int] = size
__snake_case : Dict = [0] * size
__snake_case : str = [0] * size
@staticmethod
def SCREAMING_SNAKE_CASE__ (__a) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def SCREAMING_SNAKE_CASE__ (__a) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None:
"""simple docstring"""
__snake_case : str = value
while index < self.size:
__snake_case : Any = self.get_prev(__a) + 1
if current_left_border == index:
__snake_case : Any = value
else:
__snake_case : List[Any] = max(__a , __a , __a)
__snake_case : List[str] = self.get_next(__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
__snake_case : int = 0
while left <= right:
__snake_case : Dict = self.get_prev(__a)
if left <= current_left:
__snake_case : List[str] = max(__a , self.tree[right])
__snake_case : Any = current_left
else:
__snake_case : Any = max(__a , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A ) | 61 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a_ ( unittest.TestCase , UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = load_tool('text-to-speech')
self.tool.setup()
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Dict = self.tool('hey')
__snake_case : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Any = self.tool('hey')
__snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) | 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int:
"""simple docstring"""
if not is_accelerate_available():
return method
__snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(A ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *A , **A )
return wrapper | 61 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
__snake_case : Any = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( A : int , A : Dict , A : Optional[Any]=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : Optional[int] = ''
else:
__snake_case : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : int = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__snake_case : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : int = in_proj_weight[
: config.hidden_size, :
]
__snake_case : Dict = in_proj_bias[: config.hidden_size]
__snake_case : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Any = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE ( A : Any , A : Union[str, Any] , A : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = dct.pop(A )
__snake_case : str = val
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : Union[str, Any] = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Optional[Any] , A : str=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=A , )
__snake_case : List[str] = ViTHybridConfig(backbone_config=A , image_size=3_84 , num_labels=10_00 )
__snake_case : Union[str, Any] = False
# load original model from timm
__snake_case : List[str] = timm.create_model(A , pretrained=A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(A )
__snake_case : int = create_rename_keys(A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
__snake_case : str = 'huggingface/label-files'
__snake_case : Union[str, Any] = 'imagenet-1k-id2label.json'
__snake_case : Any = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : Optional[Any] = {int(A ): v for k, v in idalabel.items()}
__snake_case : Any = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__snake_case : str = ViTHybridModel(A ).eval()
else:
__snake_case : int = ViTHybridForImageClassification(A ).eval()
model.load_state_dict(A )
# create image processor
__snake_case : Any = create_transform(**resolve_data_config({} , model=A ) )
__snake_case : Optional[Any] = transform.transforms
__snake_case : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
__snake_case : Optional[int] = ViTHybridImageProcessor(
do_resize=A , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__snake_case : Optional[Any] = prepare_img()
__snake_case : Union[str, Any] = transform(A ).unsqueeze(0 )
__snake_case : Optional[int] = processor(A , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(A , A )
# verify logits
with torch.no_grad():
__snake_case : List[str] = model(A )
__snake_case : str = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
__snake_case : Union[str, Any] = timm_model.forward_features(A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A , outputs.pooler_output , atol=1e-3 )
else:
__snake_case : str = timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
__A = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 61 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a_ ( unittest.TestCase , UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = load_tool('text-to-speech')
self.tool.setup()
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Dict = self.tool('hey')
__snake_case : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Any = self.tool('hey')
__snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) | 61 | 1 |
'''simple docstring'''
class a_ :
def __init__(self , __a) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = val
__snake_case : Optional[Any] = None
__snake_case : Tuple = None
def SCREAMING_SNAKE_CASE__ (self , __a) -> int:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
__snake_case : List[Any] = Node(__a)
else:
self.left.insert(__a)
elif val > self.val:
if self.right is None:
__snake_case : Optional[int] = Node(__a)
else:
self.right.insert(__a)
else:
__snake_case : Optional[int] = val
def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : str ) -> Optional[int]:
"""simple docstring"""
# Recursive traversal
if root:
inorder(root.left , A )
res.append(root.val )
inorder(root.right , A )
def _SCREAMING_SNAKE_CASE ( A : Dict ) -> str:
"""simple docstring"""
# Build BST
if len(A ) == 0:
return arr
__snake_case : str = Node(arr[0] )
for i in range(1 , len(A ) ):
root.insert(arr[i] )
# Traverse BST in order.
__snake_case : Union[str, Any] = []
inorder(A , A )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3])) | 61 |
'''simple docstring'''
import math
class a_ :
def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__snake_case : List[str] = n
__snake_case : Tuple = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # adjacency matrix for weight
__snake_case : Union[str, Any] = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = w
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 61 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger('''transformers.models.encodec''')
__A = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
__A = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
__A = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
__A = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
__A = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def _SCREAMING_SNAKE_CASE ( A : Any , A : Tuple , A : Tuple , A : Optional[Any] , A : Union[str, Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__snake_case : str = getattr(A , A )
if weight_type is not None:
__snake_case : List[Any] = getattr(A , A ).shape
else:
__snake_case : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : Tuple = value
elif weight_type == "weight_g":
__snake_case : Optional[Any] = value
elif weight_type == "weight_v":
__snake_case : Any = value
elif weight_type == "bias":
__snake_case : Union[str, Any] = value
elif weight_type == "running_mean":
__snake_case : Optional[int] = value
elif weight_type == "running_var":
__snake_case : Dict = value
elif weight_type == "num_batches_tracked":
__snake_case : Optional[int] = value
elif weight_type == "weight_ih_l0":
__snake_case : List[str] = value
elif weight_type == "weight_hh_l0":
__snake_case : int = value
elif weight_type == "bias_ih_l0":
__snake_case : List[Any] = value
elif weight_type == "bias_hh_l0":
__snake_case : List[Any] = value
elif weight_type == "weight_ih_l1":
__snake_case : Optional[int] = value
elif weight_type == "weight_hh_l1":
__snake_case : Any = value
elif weight_type == "bias_ih_l1":
__snake_case : Tuple = value
elif weight_type == "bias_hh_l1":
__snake_case : Union[str, Any] = value
else:
__snake_case : Optional[int] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__snake_case ,__snake_case : str = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : List[str] , A : Any ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
__snake_case : Dict = MAPPING_24K
elif model_name == "encodec_48khz":
__snake_case : Optional[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(A , A ):
logger.info(F"""{name} was ignored""" )
continue
__snake_case : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__snake_case ,__snake_case : Union[str, Any] = key.split('.*.' )
if prefix in name and suffix in name:
__snake_case : List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
__snake_case : str = True
if "*" in mapped_key:
__snake_case : str = name.split(A )[0].split('.' )[-2]
__snake_case : Dict = mapped_key.replace('*' , A )
if "weight_g" in name:
__snake_case : Any = 'weight_g'
elif "weight_v" in name:
__snake_case : Dict = 'weight_v'
elif "weight_ih_l0" in name:
__snake_case : List[str] = 'weight_ih_l0'
elif "weight_hh_l0" in name:
__snake_case : Tuple = 'weight_hh_l0'
elif "bias_ih_l0" in name:
__snake_case : Tuple = 'bias_ih_l0'
elif "bias_hh_l0" in name:
__snake_case : Optional[Any] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
__snake_case : List[Any] = 'weight_ih_l1'
elif "weight_hh_l1" in name:
__snake_case : Any = 'weight_hh_l1'
elif "bias_ih_l1" in name:
__snake_case : str = 'bias_ih_l1'
elif "bias_hh_l1" in name:
__snake_case : List[Any] = 'bias_hh_l1'
elif "bias" in name:
__snake_case : str = 'bias'
elif "weight" in name:
__snake_case : str = 'weight'
elif "running_mean" in name:
__snake_case : Any = 'running_mean'
elif "running_var" in name:
__snake_case : Tuple = 'running_var'
elif "num_batches_tracked" in name:
__snake_case : Any = 'num_batches_tracked'
else:
__snake_case : Optional[Any] = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( A : Dict , A : List[Any] , A : Dict , A : str=None , A : int=None , ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
__snake_case : Any = EncodecConfig.from_pretrained(A )
else:
__snake_case : Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__snake_case : Optional[int] = [8, 5, 4, 4]
__snake_case : Optional[int] = [2.2]
__snake_case : Tuple = 64
__snake_case : List[str] = 3_20_00
__snake_case : Dict = 20_48
__snake_case : Any = False
__snake_case : List[Any] = False
__snake_case : Any = False
elif model_name == "encodec_48khz":
__snake_case : Tuple = [8, 5, 4, 2]
__snake_case : Any = [3.0, 6.0, 12.0, 24.0]
__snake_case : int = 4_80_00
__snake_case : Optional[Any] = 2
__snake_case : str = False
__snake_case : Union[str, Any] = 'time_group_norm'
__snake_case : List[str] = True
__snake_case : Tuple = 1.0
__snake_case : Tuple = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
__snake_case : List[Any] = EncodecModel(A )
__snake_case : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A )
__snake_case : Union[str, Any] = torch.load(A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__snake_case : str = original_checkpoint['best_state']
recursively_load_weights(A , A , A )
model.save_pretrained(A )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(A )
model.push_to_hub(A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 61 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class a_ ( UpperCamelCase_ ):
_snake_case = ["""pixel_values"""]
def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__a)
__snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4}
__snake_case : List[Any] = get_size_dict(__a , default_to_square=__a)
__snake_case : int = do_resize
__snake_case : List[str] = size
# Default value set here for backwards compatibility where the value in config is None
__snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__snake_case : Tuple = resample
__snake_case : Dict = do_rescale
__snake_case : Any = rescale_factor
__snake_case : str = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Dict = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""")
__snake_case : List[str] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__snake_case : Any = int(shortest_edge / crop_pct)
__snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a)
__snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct
__snake_case : Tuple = resample if resample is not None else self.resample
__snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case : List[str] = size if size is not None else self.size
__snake_case : Any = get_size_dict(__a , default_to_square=__a)
__snake_case : Dict = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__snake_case : Tuple = [to_numpy_array(__a) for image in images]
if do_resize:
__snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
__snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images]
__snake_case : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a) | 61 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 61 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__A = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a_ ( UpperCamelCase_ ):
_snake_case = """ernie_m"""
_snake_case = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__(self , __a = 2_5_0_0_0_2 , __a = 7_6_8 , __a = 1_2 , __a = 1_2 , __a = 3_0_7_2 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 5_1_4 , __a = 0.02 , __a = 1 , __a = 1E-05 , __a=None , __a=False , __a=0.0 , **__a , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a)
__snake_case : Optional[Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : Tuple = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = classifier_dropout
__snake_case : Union[str, Any] = is_decoder
__snake_case : Optional[Any] = act_dropout | 61 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = VQModel
_snake_case = """sample"""
@property
def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str:
"""simple docstring"""
__snake_case : Dict = 4
__snake_case : Optional[int] = 3
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a)
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__snake_case : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a)
self.assertIsNotNone(__a)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(__a)
__snake_case : Any = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(__a).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
__snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
__snake_case : Optional[int] = image.to(__a)
with torch.no_grad():
__snake_case : List[Any] = model(__a).sample
__snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143])
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3)) | 61 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = LEDTokenizer
_snake_case = LEDTokenizerFast
_snake_case = True
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__snake_case : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : Any = dict(zip(__a , range(len(__a))))
__snake_case : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : int = {'unk_token': '<unk>'}
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(__a) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(__a))
def SCREAMING_SNAKE_CASE__ (self , **__a) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a)
def SCREAMING_SNAKE_CASE__ (self , **__a) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
return LEDTokenizer.from_pretrained('allenai/led-base-16384')
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384')
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : int = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : Any = tokenizer(__a , max_length=len(__a) , padding=__a , return_tensors='pt')
self.assertIsInstance(__a , __a)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
__snake_case : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__a , __a)
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : Tuple = tokenizer(__a , padding=__a , return_tensors='pt')
self.assertIn('input_ids' , __a)
self.assertIn('attention_mask' , __a)
self.assertNotIn('labels' , __a)
self.assertNotIn('decoder_attention_mask' , __a)
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : str = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : Any = tokenizer(text_target=__a , max_length=3_2 , padding='max_length' , return_tensors='pt')
self.assertEqual(3_2 , targets['input_ids'].shape[1])
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : Optional[Any] = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=__a , truncation=__a , return_tensors='pt')
self.assertIsInstance(__a , __a)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2))
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : int = ['A long paragraph for summarization.']
__snake_case : List[str] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : str = tokenizer(__a , return_tensors='pt')
__snake_case : Union[str, Any] = tokenizer(text_target=__a , return_tensors='pt')
__snake_case : Any = inputs['input_ids']
__snake_case : List[Any] = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : List[Any] = ['Summary of the text.', 'Another summary.']
__snake_case : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__snake_case : List[str] = tokenizer(__a , padding=__a)
__snake_case : Optional[Any] = [[0] * len(__a) for x in encoded_output['input_ids']]
__snake_case : Dict = tokenizer.pad(__a)
self.assertSequenceEqual(outputs['global_attention_mask'] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : List[str] = 'A, <mask> AllenNLP sentence.'
__snake_case : int = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a)
__snake_case : Optional[int] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a)
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
__snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
__snake_case : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
__a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
__a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>']) | 61 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 61 | 1 |
'''simple docstring'''
import cva
import numpy as np
class a_ :
def __init__(self , __a , __a) -> int:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : str = window_size
else:
raise ValueError('invalid k value')
def __str__(self) -> str:
"""simple docstring"""
return str(self.k)
def SCREAMING_SNAKE_CASE__ (self , __a) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Any = cva.imread(__a , 0)
__snake_case ,__snake_case : Optional[Any] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Optional[int] = cva.cvtColor(__a , cva.COLOR_GRAY2RGB)
__snake_case ,__snake_case : List[str] = np.gradient(__a)
__snake_case : Optional[Any] = dx**2
__snake_case : int = dy**2
__snake_case : List[str] = dx * dy
__snake_case : Any = 0.04
__snake_case : List[Any] = self.window_size // 2
for y in range(__a , h - offset):
for x in range(__a , w - offset):
__snake_case : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Dict = (wxx * wyy) - (wxy**2)
__snake_case : List[str] = wxx + wyy
__snake_case : Tuple = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_5_5)
return color_img, corner_list
if __name__ == "__main__":
__A = HarrisCorner(0.04, 3)
__A , __A = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img) | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : list ) -> list:
"""simple docstring"""
__snake_case : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
__snake_case : Optional[Any] = True
for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : List[Any] = False
for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : Any = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 61 | 1 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( A : list , A : int | None = None , A : int | None = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Union[str, Any] = 0
if end is None:
__snake_case : int = len(A ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(A , A , A )
slowsort(A , mid + 1 , A )
if sequence[end] < sequence[mid]:
__snake_case ,__snake_case : Dict = sequence[mid], sequence[end]
slowsort(A , A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 61 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A )
else:
__snake_case : Tuple = timm.create_model('levit_128' , pretrained=A )
if hidden_sizes == 1_92:
__snake_case : int = timm.create_model('levit_192' , pretrained=A )
if hidden_sizes == 2_56:
__snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A )
if hidden_sizes == 3_84:
__snake_case : int = timm.create_model('levit_384' , pretrained=A )
from_model.eval()
__snake_case : str = LevitForImageClassificationWithTeacher(A ).eval()
__snake_case : int = OrderedDict()
__snake_case : Optional[Any] = from_model.state_dict()
__snake_case : Tuple = list(from_model.state_dict().keys() )
__snake_case : List[str] = list(our_model.state_dict().keys() )
print(len(A ) , len(A ) )
for i in range(len(A ) ):
__snake_case : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(A )
__snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) )
__snake_case : Union[str, Any] = from_model(A )
__snake_case : List[str] = our_model(A ).logits
assert torch.allclose(A , A ), "The model logits don't match the original one."
__snake_case : int = name
print(A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__snake_case : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
__snake_case : Tuple = 10_00
__snake_case : Dict = (1, num_labels)
__snake_case : List[str] = 'huggingface/label-files'
__snake_case : Any = num_labels
__snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(A ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A )
__snake_case : Dict = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__snake_case : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 61 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case : int = SwinConfig()
__snake_case : List[str] = swin_name.split('_' )
__snake_case : Any = name_split[1]
__snake_case : List[str] = int(name_split[4] )
__snake_case : Optional[Any] = int(name_split[3][-1] )
if model_size == "tiny":
__snake_case : Dict = 96
__snake_case : Optional[int] = (2, 2, 6, 2)
__snake_case : Any = (3, 6, 12, 24)
elif model_size == "small":
__snake_case : List[str] = 96
__snake_case : List[Any] = (2, 2, 18, 2)
__snake_case : Any = (3, 6, 12, 24)
elif model_size == "base":
__snake_case : Tuple = 1_28
__snake_case : Tuple = (2, 2, 18, 2)
__snake_case : Dict = (4, 8, 16, 32)
else:
__snake_case : Optional[Any] = 1_92
__snake_case : Optional[int] = (2, 2, 18, 2)
__snake_case : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__snake_case : Dict = 2_18_41
else:
__snake_case : Optional[int] = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : Tuple = 'imagenet-1k-id2label.json'
__snake_case : Optional[int] = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : str = {int(A ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Union[str, Any] = img_size
__snake_case : Any = num_classes
__snake_case : int = embed_dim
__snake_case : Optional[int] = depths
__snake_case : Optional[Any] = num_heads
__snake_case : List[str] = window_size
return config
def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
__snake_case : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case : Optional[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__snake_case : int = 'encoder.' + name
if "attn.proj" in name:
__snake_case : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
__snake_case : Any = 'layernorm.weight'
if name == "norm.bias":
__snake_case : List[str] = 'layernorm.bias'
if "head" in name:
__snake_case : Union[str, Any] = name.replace('head' , 'classifier' )
else:
__snake_case : Union[str, Any] = 'swin.' + name
return name
def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__snake_case : str = orig_state_dict.pop(A )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case : Union[str, Any] = key.split('.' )
__snake_case : Any = int(key_split[1] )
__snake_case : Optional[int] = int(key_split[3] )
__snake_case : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case : List[Any] = val[:dim, :]
__snake_case : Any = val[
dim : dim * 2, :
]
__snake_case : str = val[-dim:, :]
else:
__snake_case : Dict = val[
:dim
]
__snake_case : List[str] = val[
dim : dim * 2
]
__snake_case : Tuple = val[
-dim:
]
else:
__snake_case : Dict = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( A : int , A : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = timm.create_model(A , pretrained=A )
timm_model.eval()
__snake_case : Any = get_swin_config(A )
__snake_case : Optional[int] = SwinForImageClassification(A )
model.eval()
__snake_case : List[Any] = convert_state_dict(timm_model.state_dict() , A )
model.load_state_dict(A )
__snake_case : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : int = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
__snake_case : Dict = Image.open(requests.get(A , stream=A ).raw )
__snake_case : str = image_processor(images=A , return_tensors='pt' )
__snake_case : Optional[Any] = timm_model(inputs['pixel_values'] )
__snake_case : Optional[int] = model(**A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) | 61 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : int = encoder_stride
__snake_case : List[str] = num_attention_outputs
__snake_case : Optional[Any] = embed_dim
__snake_case : Optional[Any] = embed_dim + 1
__snake_case : List[str] = resolution
__snake_case : Optional[int] = depths
__snake_case : List[Any] = hidden_sizes
__snake_case : List[str] = dim
__snake_case : Union[str, Any] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a)
__snake_case : int = model(__a , training=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.type_sequence_label_size
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : Optional[int] = model(__a , labels=__a , training=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : str = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFEfficientFormerModelTester(self)
__snake_case : List[Any] = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__a)
__snake_case : Union[str, Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a):
__snake_case : str = model_class(__a)
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a) , __a)
if hasattr(self.model_tester , 'encoder_seq_length'):
__snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
__snake_case : str = seq_length * self.model_tester.chunk_length
else:
__snake_case : Optional[int] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , __a)
__snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int:
"""simple docstring"""
__snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = TFEfficientFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
__snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a)
__snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
__snake_case : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
__snake_case : Dict = False
__snake_case : Optional[int] = True
__snake_case : Dict = model_class(__a)
__snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Dict = True
__snake_case : str = model_class(__a)
__snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case : Tuple = model_class(__a)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case : Tuple = model(__a)
self.assertTrue(outputs_dict is not None)
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : List[str] = model(**__a , training=__a)
# verify the logits
__snake_case : str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__snake_case : List[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : Optional[int] = model(**__a , training=__a)
# verify the logits
__snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) | 61 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( UpperCamelCase_ ):
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
class a_ ( UpperCamelCase_ ):
def __init__(self , __a=1 , __a=0 , __a=2 , __a=5_1_2 , __a="cls" , __a=False , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
__snake_case : Optional[int] = project_dim
__snake_case : Tuple = pooler_fn
__snake_case : Tuple = learn_encoder
__snake_case : Union[str, Any] = use_attention_mask
class a_ ( UpperCamelCase_ ):
_snake_case = [r"""pooler""", r"""logit_scale"""]
_snake_case = [r"""position_ids""", r"""predictions.decoder.bias"""]
_snake_case = """roberta"""
_snake_case = RobertaSeriesConfig
def __init__(self , __a) -> Optional[int]:
"""simple docstring"""
super().__init__(__a)
__snake_case : List[Any] = XLMRobertaModel(__a)
__snake_case : Optional[int] = nn.Linear(config.hidden_size , config.project_dim)
__snake_case : Optional[int] = getattr(__a , 'has_pre_transformation' , __a)
if self.has_pre_transformation:
__snake_case : Optional[int] = nn.Linear(config.hidden_size , config.project_dim)
__snake_case : Dict = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def SCREAMING_SNAKE_CASE__ (self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ) -> List[str]:
"""simple docstring"""
__snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Optional[int] = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__snake_case : int = outputs['hidden_states'][-2]
__snake_case : List[str] = self.pre_LN(__a)
__snake_case : Union[str, Any] = self.transformation_pre(__a)
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__snake_case : Any = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 61 |
'''simple docstring'''
__A = {str(digit): digit**5 for digit in range(1_0)}
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution()) | 61 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__snake_case : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) )
return round(A , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a_ :
_snake_case = 42
_snake_case = None
_snake_case = None
def _SCREAMING_SNAKE_CASE ( ) -> Node | None:
"""simple docstring"""
__snake_case : str = Node(1 )
__snake_case : Tuple = Node(2 )
__snake_case : Optional[int] = Node(3 )
__snake_case : List[str] = Node(4 )
__snake_case : List[str] = Node(5 )
return tree
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
__snake_case : list[Any] = []
if root is None:
return output
__snake_case : Optional[int] = deque([root] )
while process_queue:
__snake_case : List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]:
"""simple docstring"""
__snake_case : list[Any] = []
def populate_output(A : Node | None , A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]:
"""simple docstring"""
__snake_case : list[Any] = []
def populate_output(A : Node | None , A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
__snake_case : list[Sequence[Node | None]] = []
__snake_case : List[Any] = 0
__snake_case : int = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
__snake_case : int = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
__snake_case : Tuple = 0
return output
def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
"""simple docstring"""
__snake_case : Optional[int] = make_tree()
print(F"""In-order Traversal: {inorder(A )}""" )
print(F"""Pre-order Traversal: {preorder(A )}""" )
print(F"""Post-order Traversal: {postorder(A )}""" , '\n' )
print(F"""Height of Tree: {height(A )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 61 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a_ :
def __init__(self , __a = None) -> None:
"""simple docstring"""
if components is None:
__snake_case : List[str] = []
__snake_case : Optional[int] = list(__a)
def __len__(self) -> int:
"""simple docstring"""
return len(self.__components)
def __str__(self) -> str:
"""simple docstring"""
return "(" + ",".join(map(__a , self.__components)) + ")"
def __add__(self , __a) -> Vector:
"""simple docstring"""
__snake_case : Optional[Any] = len(self)
if size == len(__a):
__snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)]
return Vector(__a)
else:
raise Exception('must have the same size')
def __sub__(self , __a) -> Vector:
"""simple docstring"""
__snake_case : Optional[Any] = len(self)
if size == len(__a):
__snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)]
return Vector(__a)
else: # error case
raise Exception('must have the same size')
@overload
def __mul__(self , __a) -> Vector:
"""simple docstring"""
...
@overload
def __mul__(self , __a) -> float:
"""simple docstring"""
...
def __mul__(self , __a) -> float | Vector:
"""simple docstring"""
if isinstance(__a , (float, int)):
__snake_case : str = [c * other for c in self.__components]
return Vector(__a)
elif isinstance(__a , __a) and len(self) == len(__a):
__snake_case : List[Any] = len(self)
__snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)]
return sum(__a)
else: # error case
raise Exception('invalid operand!')
def SCREAMING_SNAKE_CASE__ (self) -> Vector:
"""simple docstring"""
return Vector(self.__components)
def SCREAMING_SNAKE_CASE__ (self , __a) -> float:
"""simple docstring"""
if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception('index out of range')
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None:
"""simple docstring"""
assert -len(self.__components) <= pos < len(self.__components)
__snake_case : int = value
def SCREAMING_SNAKE_CASE__ (self) -> float:
"""simple docstring"""
if len(self.__components) == 0:
raise Exception('Vector is empty')
__snake_case : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(__a))
def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float:
"""simple docstring"""
__snake_case : Tuple = self * other
__snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A ) and (isinstance(A , A ))
__snake_case : Any = [0] * dimension
__snake_case : int = 1
return Vector(A )
def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(A , A )
and isinstance(A , A )
and (isinstance(A , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector:
"""simple docstring"""
random.seed(A )
__snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )]
return Vector(A )
class a_ :
def __init__(self , __a , __a , __a) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = matrix
__snake_case : int = w
__snake_case : str = h
def __str__(self) -> str:
"""simple docstring"""
__snake_case : Dict = ''
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__(self , __a) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : Tuple = []
for i in range(self.__height):
__snake_case : List[Any] = [
self.__matrix[i][j] + other.component(__a , __a)
for j in range(self.__width)
]
matrix.append(__a)
return Matrix(__a , self.__width , self.__height)
else:
raise Exception('matrix must have the same dimension!')
def __sub__(self , __a) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : str = []
for i in range(self.__height):
__snake_case : List[str] = [
self.__matrix[i][j] - other.component(__a , __a)
for j in range(self.__width)
]
matrix.append(__a)
return Matrix(__a , self.__width , self.__height)
else:
raise Exception('matrices must have the same dimension!')
@overload
def __mul__(self , __a) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__(self , __a) -> Vector:
"""simple docstring"""
...
def __mul__(self , __a) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__a , __a): # matrix-vector
if len(__a) == self.__width:
__snake_case : Tuple = zero_vector(self.__height)
for i in range(self.__height):
__snake_case : Union[str, Any] = [
self.__matrix[i][j] * other.component(__a)
for j in range(self.__width)
]
ans.change_component(__a , sum(__a))
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!')
elif isinstance(__a , (int, float)): # matrix-scalar
__snake_case : str = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(__a , self.__width , self.__height)
return None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return self.__height
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return self.__width
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__snake_case : List[Any] = value
else:
raise Exception('change_component: indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
__snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a)):
__snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1).determinant()
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a)
else:
raise Exception('Indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
if self.__height < 1:
raise Exception('Matrix has no element')
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__snake_case : Any = [
self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width)
]
return sum(__a)
def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix:
"""simple docstring"""
__snake_case : list[list[float]] = [[0] * n for _ in range(A )]
return Matrix(A , A , A )
def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix:
"""simple docstring"""
random.seed(A )
__snake_case : list[list[float]] = [
[random.randint(A , A ) for _ in range(A )] for _ in range(A )
]
return Matrix(A , A , A ) | 61 | 1 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__A = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
__A = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
__A = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
__A = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
__A = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _SCREAMING_SNAKE_CASE ( A : int , A : str ) -> Dict:
"""simple docstring"""
for tf_name, hf_name in patterns:
__snake_case : List[Any] = k.replace(A , A )
return k
def _SCREAMING_SNAKE_CASE ( A : dict , A : dict ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
__snake_case : Tuple = BigBirdPegasusConfig(**A )
__snake_case : Dict = BigBirdPegasusForConditionalGeneration(A )
__snake_case : int = torch_model.state_dict()
__snake_case : str = {}
# separating decoder weights
__snake_case : str = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__snake_case : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__snake_case : Tuple = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
__snake_case : str = DECODER_PATTERNS
__snake_case : Optional[int] = rename_state_dict_key(A , A )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__snake_case : Optional[Any] = v.T
__snake_case : Dict = torch.from_numpy(A )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__snake_case : List[str] = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
__snake_case : List[str] = REMAINING_PATTERNS
__snake_case : List[str] = rename_state_dict_key(A , A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__snake_case : Union[str, Any] = v.T
__snake_case : Dict = torch.from_numpy(A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__snake_case : Optional[int] = mapping['model.embed_positions.weight']
__snake_case : Tuple = mapping.pop('model.embed_positions.weight' )
__snake_case ,__snake_case : Optional[int] = torch_model.load_state_dict(A , strict=A )
__snake_case : Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = tf.train.list_variables(A )
__snake_case : Optional[int] = {}
__snake_case : int = ['global_step']
for name, shape in tqdm(A , desc='converting tf checkpoint to dict' ):
__snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case : List[str] = tf.train.load_variable(A , A )
__snake_case : Union[str, Any] = array
return tf_weights
def _SCREAMING_SNAKE_CASE ( A : str , A : str , A : dict ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_tf_weights_as_numpy(A )
__snake_case : List[Any] = convert_bigbird_pegasus(A , A )
torch_model.save_pretrained(A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__A = parser.parse_args()
__A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 61 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__A = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__A = '''main'''
# Default branch name
__A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
__A = '''aaaaaaa'''
# This commit does not exist, so we should 404.
__A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
__A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class a_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers') is not None
class a_ ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE__ (self , __a) -> int:
"""simple docstring"""
with ContextManagers([]):
print('Transformers are awesome!')
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]:
"""simple docstring"""
with ContextManagers([context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n')
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(find_labels(__a) , ['labels'])
self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions'])
class a_ ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__a) , ['labels'])
@require_tf
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
self.assertEqual(find_labels(__a) , ['labels'])
self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions'])
class a_ ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__a) , ['labels'])
@require_flax
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
self.assertEqual(find_labels(__a) , [])
self.assertEqual(find_labels(__a) , [])
self.assertEqual(find_labels(__a) , [])
class a_ ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__a) , []) | 61 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = KandinskyImgaImgPipeline
_snake_case = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_snake_case = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_snake_case = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_snake_case = False
@property
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__snake_case : int = MultilingualCLIP(__a)
__snake_case : Any = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__snake_case : List[str] = UNetaDConditionModel(**__a)
return model
@property
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : List[Any] = VQModel(**self.dummy_movq_kwargs)
return model
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Union[str, Any] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_unet
__snake_case : List[str] = self.dummy_movq
__snake_case : str = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__snake_case : List[str] = DDIMScheduler(**__a)
__snake_case : Any = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE__ (self , __a , __a=0) -> List[Any]:
"""simple docstring"""
__snake_case : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a)).to(__a)
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(__a)
# create init_image
__snake_case : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__a)).to(__a)
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__a)).convert('RGB').resize((2_5_6, 2_5_6))
if str(__a).startswith('mps'):
__snake_case : Optional[int] = torch.manual_seed(__a)
else:
__snake_case : Any = torch.Generator(device=__a).manual_seed(__a)
__snake_case : Union[str, Any] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case : Tuple = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : Optional[int] = self.pipeline_class(**__a)
__snake_case : Tuple = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__a))
__snake_case : List[str] = output.images
__snake_case : Union[str, Any] = pipe(
**self.get_dummy_inputs(__a) , return_dict=__a , )[0]
__snake_case : int = image[0, -3:, -3:, -1]
__snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__snake_case : List[str] = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
__snake_case : int = 'A red cartoon frog, 4k'
__snake_case : Tuple = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(__a)
__snake_case : Tuple = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa)
__snake_case : Any = pipeline.to(__a)
pipeline.set_progress_bar_config(disable=__a)
__snake_case : Dict = torch.Generator(device='cpu').manual_seed(0)
__snake_case ,__snake_case : Optional[int] = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__snake_case : List[Any] = pipeline(
__a , image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
__snake_case : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__a , __a) | 61 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = XLNetTokenizer
_snake_case = XLNetTokenizerFast
_snake_case = True
_snake_case = True
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : int = XLNetTokenizer(__a , keep_accents=__a)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = '<s>'
__snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a)
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '<eod>')
self.assertEqual(len(__a) , 1_0_0_6)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : Dict = XLNetTokenizer(__a , keep_accents=__a)
__snake_case : List[Any] = tokenizer.tokenize('This is a test')
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__snake_case : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(__a , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = XLNetTokenizer(__a , do_lower_case=__a)
__snake_case : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o'])
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : int = XLNetTokenizer(__a , do_lower_case=__a)
__snake_case : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = XLNetTokenizer.from_pretrained('xlnet-base-cased')
__snake_case : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=__a)
__snake_case : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a)
__snake_case : Dict = tokenizer.build_inputs_with_special_tokens(__a)
__snake_case : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , ) | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
__snake_case : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
while number > 0:
__snake_case : Dict = number % 10
sum_of_digits += last_digit
__snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int:
"""simple docstring"""
__snake_case : List[Any] = factorial(A )
__snake_case : Dict = split_and_add(A )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 61 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = MobileBertTokenizer
_snake_case = MobileBertTokenizerFast
_snake_case = True
_snake_case = True
_snake_case = filter_non_english
_snake_case = """google/mobilebert-uncased"""
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
super().setUp()
__snake_case : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__snake_case : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple:
"""simple docstring"""
__snake_case : Any = 'UNwant\u00E9d,running'
__snake_case : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Dict = self.tokenizer_class(self.vocab_file)
__snake_case : int = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 1_2, 1_0, 1_1])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case : Dict = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : int = 'UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.tokenize(__a)
__snake_case : int = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
__snake_case : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a)
__snake_case : int = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
__snake_case : Optional[int] = self.get_rust_tokenizer()
__snake_case : List[str] = tokenizer.encode(__a)
__snake_case : int = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
# With lower casing
__snake_case : Tuple = self.get_tokenizer(do_lower_case=__a)
__snake_case : int = self.get_rust_tokenizer(do_lower_case=__a)
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : str = tokenizer.tokenize(__a)
__snake_case : Any = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
__snake_case : List[str] = tokenizer.encode(__a , add_special_tokens=__a)
__snake_case : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = tokenizer.encode(__a)
__snake_case : Optional[Any] = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : Optional[Any] = {}
for i, token in enumerate(__a):
__snake_case : str = i
__snake_case : Optional[Any] = WordpieceTokenizer(vocab=__a , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased')
__snake_case : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=__a)
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a)
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a)
__snake_case : str = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Dict = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__snake_case : Union[str, Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__snake_case : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case') else False
__snake_case : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = ['的', '人', '有']
__snake_case : List[str] = ''.join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : Any = True
__snake_case : int = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : int = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : List[Any] = tokenizer_p.encode(__a , add_special_tokens=__a)
__snake_case : Optional[Any] = tokenizer_r.encode(__a , add_special_tokens=__a)
__snake_case : int = tokenizer_r.convert_ids_to_tokens(__a)
__snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
__snake_case : Tuple = False
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : str = tokenizer_r.encode(__a , add_special_tokens=__a)
__snake_case : int = tokenizer_p.encode(__a , add_special_tokens=__a)
__snake_case : str = tokenizer_r.convert_ids_to_tokens(__a)
__snake_case : Any = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a) | 61 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a_ ( unittest.TestCase ):
def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__snake_case : Optional[int] = parent
__snake_case : Dict = batch_size
__snake_case : str = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : Optional[int] = do_resize
__snake_case : Optional[int] = size
__snake_case : Union[str, Any] = do_center_crop
__snake_case : List[Any] = crop_size
__snake_case : int = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : str = image_std
__snake_case : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__snake_case : Dict = []
for i in range(self.batch_size):
__snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
if torchify:
__snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4})
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8})
__snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4})
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
__snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : int = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a)
__snake_case : List[Any] = 3
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 61 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class a_ :
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_snake_case = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Any = training_args.get_process_log_level()
logger.setLevel(A )
datasets.utils.logging.set_verbosity(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__snake_case : Any = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__snake_case : int = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : Optional[Any] = train_dataset.features['label'].names
if training_args.do_eval:
__snake_case : Any = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : Any = eval_dataset.features['label'].names
if training_args.do_predict:
__snake_case : Any = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = predict_dataset.features['label'].names
# Labels
__snake_case : Dict = len(A )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel={str(A ): label for i, label in enumerate(A )} , labelaid={label: i for i, label in enumerate(A )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__snake_case : List[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__snake_case : int = False
def preprocess_function(A : List[str] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=A , max_length=data_args.max_seq_length , truncation=A , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(A ) , data_args.max_train_samples )
__snake_case : Dict = train_dataset.select(range(A ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
A , batched=A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(A ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__snake_case : Optional[int] = min(len(A ) , data_args.max_eval_samples )
__snake_case : Optional[int] = eval_dataset.select(range(A ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : Dict = eval_dataset.map(
A , batched=A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__snake_case : Dict = min(len(A ) , data_args.max_predict_samples )
__snake_case : Optional[Any] = predict_dataset.select(range(A ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
__snake_case : Union[str, Any] = predict_dataset.map(
A , batched=A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
__snake_case : Optional[int] = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A : EvalPrediction ):
__snake_case : List[str] = p.predictions[0] if isinstance(p.predictions , A ) else p.predictions
__snake_case : Optional[Any] = np.argmax(A , axis=1 )
return metric.compute(predictions=A , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__snake_case : int = default_data_collator
elif training_args.fpaa:
__snake_case : Any = DataCollatorWithPadding(A , pad_to_multiple_of=8 )
else:
__snake_case : Tuple = None
# Initialize our Trainer
__snake_case : Dict = Trainer(
model=A , args=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=A , tokenizer=A , data_collator=A , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : int = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=A )
__snake_case : Union[str, Any] = train_result.metrics
__snake_case : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A )
)
__snake_case : List[Any] = min(A , len(A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , A )
trainer.save_metrics('train' , A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate(eval_dataset=A )
__snake_case : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A )
__snake_case : Optional[Any] = min(A , len(A ) )
trainer.log_metrics('eval' , A )
trainer.save_metrics('eval' , A )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A , metric_key_prefix='predict' )
__snake_case : int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A )
)
__snake_case : Union[str, Any] = min(A , len(A ) )
trainer.log_metrics('predict' , A )
trainer.save_metrics('predict' , A )
__snake_case : Union[str, Any] = np.argmax(A , axis=1 )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(A ):
__snake_case : List[Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main() | 61 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """vit_msn"""
def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(**__a)
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = qkv_bias | 61 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__A = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__A = parser.parse_args()
__A = '''cpu'''
__A = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__A = '''path-to-your-trained-model'''
__A = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__A = pipe.to(device)
# to channels last
__A = pipe.unet.to(memory_format=torch.channels_last)
__A = pipe.vae.to(memory_format=torch.channels_last)
__A = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__A = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__A = torch.randn(2, 4, 6_4, 6_4)
__A = torch.rand(1) * 9_9_9
__A = torch.randn(2, 7_7, 7_6_8)
__A = (sample, timestep, encoder_hidden_status)
try:
__A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__A = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__A = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__A = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__A = 6_6_6
__A = torch.Generator(device).manual_seed(seed)
__A = {'''generator''': generator}
if args.steps is not None:
__A = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__A = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''') | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__snake_case : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) )
return round(A , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = RobertaTokenizer
_snake_case = RobertaTokenizerFast
_snake_case = True
_snake_case = {"""cls_token""": """<s>"""}
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : Tuple = dict(zip(__a , range(len(__a))))
__snake_case : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : Dict = {'unk_token': '<unk>'}
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(__a) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(__a))
def SCREAMING_SNAKE_CASE__ (self , **__a) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a)
def SCREAMING_SNAKE_CASE__ (self , **__a) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = 'lower newer'
__snake_case : Optional[int] = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
__snake_case : Optional[Any] = 'lower newer'
__snake_case : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case : Optional[int] = tokenizer.tokenize(__a) # , add_prefix_space=True)
self.assertListEqual(__a , __a)
__snake_case : List[Any] = tokens + [tokenizer.unk_token]
__snake_case : Optional[int] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__a) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2])
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__a) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = self.tokenizer_class.from_pretrained('roberta-base')
__snake_case : int = tokenizer.encode('sequence builders' , add_special_tokens=__a)
__snake_case : Any = tokenizer.encode('multi-sequence build' , add_special_tokens=__a)
__snake_case : Union[str, Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=__a , add_prefix_space=__a)
__snake_case : List[Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__a , add_prefix_space=__a)
__snake_case : str = tokenizer.build_inputs_with_special_tokens(__a)
__snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : str = self.get_tokenizer()
__snake_case : Any = 'Encode this sequence.'
__snake_case : Dict = tokenizer.byte_encoder[' '.encode('utf-8')[0]]
# Testing encoder arguments
__snake_case : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a)
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(__a , __a)
__snake_case : Tuple = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a)
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(__a , __a)
tokenizer.add_special_tokens({'bos_token': '<s>'})
__snake_case : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a)
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(__a , __a)
# Testing spaces after special tokens
__snake_case : Dict = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__a , lstrip=__a , rstrip=__a)}) # mask token has a left space
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(__a)
__snake_case : int = 'Encode <mask> sequence'
__snake_case : List[Any] = 'Encode <mask>sequence'
__snake_case : str = tokenizer.encode(__a)
__snake_case : str = encoded.index(__a)
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(__a , __a)
__snake_case : Dict = tokenizer.encode(__a)
__snake_case : int = encoded.index(__a)
__snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(__a , __a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Dict = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Any = 'A, <mask> AllenNLP sentence.'
__snake_case : Optional[int] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a)
__snake_case : int = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
__snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
__snake_case : int = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
__a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
__a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
__snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
__snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __a)
self.assertEqual(post_processor_state['add_prefix_space'] , __a)
self.assertEqual(post_processor_state['trim_offsets'] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : str = F"""{text_of_1_token} {text_of_1_token}"""
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a)
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a)))
self.assertEqual(
encoding.offset_mapping[1] , (len(__a) + 1, len(__a) + 1 + len(__a)) , )
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a)
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a)))
self.assertEqual(
encoding.offset_mapping[1] , (len(__a) + 1, len(__a) + 1 + len(__a)) , )
__snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : Optional[int] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a)
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a)))
self.assertEqual(
encoding.offset_mapping[1] , (len(__a), len(__a) + 1 + len(__a)) , )
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : List[str] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a)
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a)))
self.assertEqual(
encoding.offset_mapping[1] , (len(__a), len(__a) + 1 + len(__a)) , )
__snake_case : Any = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : List[str] = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a) + 1, 1 + len(__a) + 1 + len(__a)) , )
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a), 1 + len(__a) + 1 + len(__a)) , )
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a)
__snake_case : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a), 1 + len(__a) + 1 + len(__a)) , ) | 61 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A ) | 61 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
set_seed(7_7_0)
__A = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__A = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__A = os.path.dirname(os.path.abspath(__file__))
__A = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__A = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Dict=False ) -> Any:
"""simple docstring"""
__snake_case : Any = model_type
if use_small:
key += "_small"
return os.path.join(A , REMOTE_MODEL_PATHS[key]['file_name'] )
def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Any ) -> List[Any]:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
hf_hub_download(repo_id=A , filename=A , local_dir=A )
def _SCREAMING_SNAKE_CASE ( A : Dict , A : int , A : Dict=False , A : Dict="text" ) -> str:
"""simple docstring"""
if model_type == "text":
__snake_case : List[Any] = BarkSemanticModel
__snake_case : int = BarkSemanticConfig
__snake_case : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
__snake_case : Union[str, Any] = BarkCoarseModel
__snake_case : Optional[int] = BarkCoarseConfig
__snake_case : str = BarkCoarseGenerationConfig
elif model_type == "fine":
__snake_case : Dict = BarkFineModel
__snake_case : Optional[Any] = BarkFineConfig
__snake_case : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
__snake_case : Any = F"""{model_type}_small""" if use_small else model_type
__snake_case : List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
__snake_case : List[Any] = torch.load(A , map_location=A )
# this is a hack
__snake_case : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__snake_case : Optional[Any] = model_args['vocab_size']
__snake_case : Optional[Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__snake_case : Any = model_args.pop('n_head' )
__snake_case : Tuple = model_args.pop('n_embd' )
__snake_case : int = model_args.pop('n_layer' )
__snake_case : str = ConfigClass(**checkpoint['model_args'] )
__snake_case : List[str] = ModelClass(config=A )
__snake_case : List[Any] = GenerationConfigClass()
__snake_case : Optional[Any] = model_generation_config
__snake_case : int = checkpoint['model']
# fixup checkpoint
__snake_case : Optional[Any] = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(A ):
# replace part of the key with corresponding layer name in HF implementation
__snake_case : Union[str, Any] = k[len(A ) :]
for old_layer_name in new_layer_name_dict:
__snake_case : List[Any] = new_k.replace(A , new_layer_name_dict[old_layer_name] )
__snake_case : Optional[int] = state_dict.pop(A )
__snake_case : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__snake_case : List[str] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__snake_case : Union[str, Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
__snake_case : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(A ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(A ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(A , strict=A )
__snake_case : int = model.num_parameters(exclude_embeddings=A )
__snake_case : List[Any] = checkpoint['best_val_loss'].item()
logger.info(F"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss""" )
model.eval()
model.to(A )
del checkpoint, state_dict
return model
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Optional[int]=False , A : Union[str, Any]="text" ) -> Dict:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__snake_case : List[str] = 'cpu' # do conversion on cpu
__snake_case : str = _get_ckpt_path(A , use_small=A )
__snake_case : List[Any] = _load_model(A , A , model_type=A , use_small=A )
# load bark initial model
__snake_case : Optional[int] = _bark_load_model(A , 'cpu' , model_type=A , use_small=A )
if model_type == "text":
__snake_case : str = bark_model['model']
if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__snake_case : Optional[Any] = 5
__snake_case : Optional[Any] = 10
if model_type in ["text", "coarse"]:
__snake_case : Any = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
__snake_case : Union[str, Any] = bark_model(A )[0]
__snake_case : Optional[int] = model(A )
# take last logits
__snake_case : List[Any] = output_new_model_total.logits[:, [-1], :]
else:
__snake_case : Tuple = 3
__snake_case : List[str] = 8
__snake_case : str = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__snake_case : Optional[Any] = model(A , A )
__snake_case : Optional[Any] = bark_model(A , A )
__snake_case : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
def _SCREAMING_SNAKE_CASE ( A : List[str] , A : Dict , A : Optional[int] , A : int , A : List[str] , A : str , ) -> int:
"""simple docstring"""
__snake_case : Tuple = os.path.join(A , A )
__snake_case : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(A , 'config.json' ) )
__snake_case : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(A , 'config.json' ) )
__snake_case : Any = BarkFineConfig.from_pretrained(os.path.join(A , 'config.json' ) )
__snake_case : Union[str, Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__snake_case : List[str] = BarkSemanticModel.from_pretrained(A )
__snake_case : Tuple = BarkCoarseModel.from_pretrained(A )
__snake_case : Optional[Any] = BarkFineModel.from_pretrained(A )
__snake_case : Optional[int] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__snake_case : List[Any] = BarkConfig.from_sub_model_configs(
A , A , A , A )
__snake_case : Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__snake_case : Union[str, Any] = BarkModel(A )
__snake_case : List[Any] = semantic
__snake_case : int = coarseAcoustic
__snake_case : Union[str, Any] = fineAcoustic
__snake_case : Any = codec
__snake_case : Dict = bark_generation_config
Path(A ).mkdir(exist_ok=A )
bark.save_pretrained(A , repo_id=A , push_to_hub=A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__A = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _SCREAMING_SNAKE_CASE ( A : Any ) -> int:
"""simple docstring"""
__snake_case ,__snake_case : int = image.size
__snake_case ,__snake_case : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case : Dict = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
__snake_case : int = np.array(A ).astype(np.floataa ) / 255.0
__snake_case : Dict = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Optional[int] = torch.from_numpy(A )
return 2.0 * image - 1.0
class a_ ( UpperCamelCase_ ):
def __init__(self , __a , __a , __a , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__a , unet=__a , scheduler=__a)
@torch.no_grad()
def __call__(self , __a = None , __a = 1 , __a = 1_0_0 , __a = 0.0 , __a = None , __a = "pil" , __a = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(__a , PIL.Image.Image):
__snake_case : Optional[int] = 1
elif isinstance(__a , torch.Tensor):
__snake_case : int = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a)}""")
if isinstance(__a , PIL.Image.Image):
__snake_case : Any = preprocess(__a)
__snake_case ,__snake_case : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : List[str] = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : Optional[Any] = next(self.unet.parameters()).dtype
__snake_case : Tuple = randn_tensor(__a , generator=__a , device=self.device , dtype=__a)
__snake_case : Optional[int] = image.to(device=self.device , dtype=__a)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__a , device=self.device)
__snake_case : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case : Optional[Any] = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(__a):
# concat latents and low resolution image in the channel dimension.
__snake_case : Optional[Any] = torch.cat([latents, image] , dim=1)
__snake_case : Any = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
__snake_case : Optional[Any] = self.unet(__a , __a).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : List[str] = self.scheduler.step(__a , __a , __a , **__a).prev_sample
# decode the image latents with the VQVAE
__snake_case : Tuple = self.vqvae.decode(__a).sample
__snake_case : Any = torch.clamp(__a , -1.0 , 1.0)
__snake_case : Dict = image / 2 + 0.5
__snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__snake_case : Optional[int] = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a) | 61 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int:
"""simple docstring"""
if not is_accelerate_available():
return method
__snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(A ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *A , **A )
return wrapper | 61 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = BlenderbotSmallTokenizer
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
super().setUp()
__snake_case : List[Any] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__snake_case : Optional[Any] = dict(zip(__a , range(len(__a))))
__snake_case : List[str] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__snake_case : Optional[int] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(__a) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(__a))
def SCREAMING_SNAKE_CASE__ (self , **__a) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a) -> int:
"""simple docstring"""
__snake_case : Optional[int] = 'adapt act apte'
__snake_case : str = 'adapt act apte'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__snake_case : Dict = 'adapt act apte'
__snake_case : List[str] = ['adapt', 'act', 'ap@@', 'te']
__snake_case : int = tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
__snake_case : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__snake_case : str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : Dict = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
assert tok('sam').input_ids == [1_3_8_4]
__snake_case : int = 'I am a small frog.'
__snake_case : Optional[Any] = tok([src_text] , padding=__a , truncation=__a)['input_ids']
__snake_case : int = tok.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
__snake_case : str = 'I am a small frog .'
__snake_case : Tuple = '.'
__snake_case : int = tok(__a)['input_ids']
__snake_case : Optional[Any] = tok(__a)['input_ids']
assert encoded[-1] == encoded_dot[0] | 61 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a_ ( unittest.TestCase , UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = load_tool('text-to-speech')
self.tool.setup()
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Dict = self.tool('hey')
__snake_case : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Any = self.tool('hey')
__snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) | 61 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__a):
__snake_case : int = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
__snake_case : Any = FlaxAutoModel.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__a):
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
__snake_case : Union[str, Any] = FlaxAutoModel.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(__a)
__snake_case : Optional[int] = FlaxBertModel.from_pretrained(__a)
__snake_case : Optional[int] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**__a):
return model(**__a)
eval(**__a).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained(__a)
__snake_case : List[Any] = FlaxRobertaModel.from_pretrained(__a)
__snake_case : Any = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**__a):
return model(**__a)
eval(**__a).block_until_ready()
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case : List[str] = FlaxAutoModel.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
__a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : Dict = FlaxAutoModel.from_pretrained(__a , revision='aaaaaa')
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__snake_case : int = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(__a , 'Use `from_pt=True` to load this model'):
__snake_case : Optional[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only') | 61 |
'''simple docstring'''
import math
class a_ :
def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__snake_case : List[str] = n
__snake_case : Tuple = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # adjacency matrix for weight
__snake_case : Union[str, Any] = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = w
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 61 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a_ ( UpperCamelCase_ ):
def __init__(self , __a = None , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = path_or_paths
__snake_case : Tuple = split if split or isinstance(__a , __a) else 'train'
__snake_case : List[str] = features
__snake_case : List[Any] = cache_dir
__snake_case : Any = keep_in_memory
__snake_case : Optional[Any] = streaming
__snake_case : Optional[Any] = num_proc
__snake_case : Tuple = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ (self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class a_ ( UpperCamelCase_ ):
def __init__(self , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Any:
"""simple docstring"""
__snake_case : List[str] = features
__snake_case : List[str] = cache_dir
__snake_case : Any = keep_in_memory
__snake_case : Optional[int] = streaming
__snake_case : List[Any] = num_proc
__snake_case : Dict = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ (self) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass | 61 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class a_ ( UpperCamelCase_ ):
_snake_case = ["""pixel_values"""]
def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__a)
__snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4}
__snake_case : List[Any] = get_size_dict(__a , default_to_square=__a)
__snake_case : int = do_resize
__snake_case : List[str] = size
# Default value set here for backwards compatibility where the value in config is None
__snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__snake_case : Tuple = resample
__snake_case : Dict = do_rescale
__snake_case : Any = rescale_factor
__snake_case : str = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Dict = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""")
__snake_case : List[str] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__snake_case : Any = int(shortest_edge / crop_pct)
__snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a)
__snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct
__snake_case : Tuple = resample if resample is not None else self.resample
__snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case : List[str] = size if size is not None else self.size
__snake_case : Any = get_size_dict(__a , default_to_square=__a)
__snake_case : Dict = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__snake_case : Tuple = [to_numpy_array(__a) for image in images]
if do_resize:
__snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
__snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images]
__snake_case : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a) | 61 | 1 |
'''simple docstring'''
from string import ascii_uppercase
__A = {char: i for i, char in enumerate(ascii_uppercase)}
__A = dict(enumerate(ascii_uppercase))
def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> str:
"""simple docstring"""
__snake_case : Dict = len(A )
__snake_case : Tuple = 0
while True:
if x == i:
__snake_case : Tuple = 0
if len(A ) == len(A ):
break
key += key[i]
i += 1
return key
def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> str:
"""simple docstring"""
__snake_case : Any = ''
__snake_case : int = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__snake_case : Tuple = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = ''
__snake_case : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__snake_case : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
__snake_case : List[str] = 'THE GERMAN ATTACK'
__snake_case : Optional[int] = 'SECRET'
__snake_case : Any = generate_key(A , A )
__snake_case : Optional[int] = cipher_text(A , A )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(A , A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 61 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
# TODO: upload to AWS
__A = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class a_ ( UpperCamelCase_ ):
_snake_case = """retribert"""
def __init__(self , __a=3_0_5_2_2 , __a=7_6_8 , __a=8 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_1_2 , __a=2 , __a=0.02 , __a=1E-12 , __a=True , __a=1_2_8 , __a=0 , **__a , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a)
__snake_case : Optional[int] = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : Dict = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = hidden_act
__snake_case : Dict = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : int = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : int = layer_norm_eps
__snake_case : List[Any] = share_encoders
__snake_case : List[Any] = projection_dim | 61 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = VQModel
_snake_case = """sample"""
@property
def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str:
"""simple docstring"""
__snake_case : Dict = 4
__snake_case : Optional[int] = 3
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a)
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__snake_case : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a)
self.assertIsNotNone(__a)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(__a)
__snake_case : Any = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(__a).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
__snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
__snake_case : Optional[int] = image.to(__a)
with torch.no_grad():
__snake_case : List[Any] = model(__a).sample
__snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143])
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3)) | 61 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A ):
http_head('https://huggingface.co' ) | 61 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 61 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def _SCREAMING_SNAKE_CASE ( A : Iterable[str] , A : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
__snake_case : Dict = iter(A )
while True:
__snake_case : Dict = tuple(itertools.islice(A , A ) )
if not chunk:
return
yield chunk
def _SCREAMING_SNAKE_CASE ( A : str ) -> str:
"""simple docstring"""
__snake_case : List[str] = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : List[Any] = ''
if len(A ) < 2:
return dirty
for i in range(len(A ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(A ) & 1:
clean += "X"
return clean
def _SCREAMING_SNAKE_CASE ( A : str ) -> list[str]:
"""simple docstring"""
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__snake_case : List[str] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Dict = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(A )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(A )
return table
def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> str:
"""simple docstring"""
__snake_case : List[Any] = generate_table(A )
__snake_case : Optional[int] = prepare_input(A )
__snake_case : List[str] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A , 2 ):
__snake_case ,__snake_case : List[str] = divmod(table.index(A ) , 5 )
__snake_case ,__snake_case : Optional[int] = divmod(table.index(A ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> str:
"""simple docstring"""
__snake_case : Any = generate_table(A )
__snake_case : Optional[Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A , 2 ):
__snake_case ,__snake_case : Union[str, Any] = divmod(table.index(A ) , 5 )
__snake_case ,__snake_case : Optional[int] = divmod(table.index(A ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : list ) -> list:
"""simple docstring"""
__snake_case : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
__snake_case : Optional[Any] = True
for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : List[Any] = False
for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : Any = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 61 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''MaskFormerFeatureExtractor''']
__A = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
__A = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 61 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A )
else:
__snake_case : Tuple = timm.create_model('levit_128' , pretrained=A )
if hidden_sizes == 1_92:
__snake_case : int = timm.create_model('levit_192' , pretrained=A )
if hidden_sizes == 2_56:
__snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A )
if hidden_sizes == 3_84:
__snake_case : int = timm.create_model('levit_384' , pretrained=A )
from_model.eval()
__snake_case : str = LevitForImageClassificationWithTeacher(A ).eval()
__snake_case : int = OrderedDict()
__snake_case : Optional[Any] = from_model.state_dict()
__snake_case : Tuple = list(from_model.state_dict().keys() )
__snake_case : List[str] = list(our_model.state_dict().keys() )
print(len(A ) , len(A ) )
for i in range(len(A ) ):
__snake_case : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(A )
__snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) )
__snake_case : Union[str, Any] = from_model(A )
__snake_case : List[str] = our_model(A ).logits
assert torch.allclose(A , A ), "The model logits don't match the original one."
__snake_case : int = name
print(A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__snake_case : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
__snake_case : Tuple = 10_00
__snake_case : Dict = (1, num_labels)
__snake_case : List[str] = 'huggingface/label-files'
__snake_case : Any = num_labels
__snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(A ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A )
__snake_case : Dict = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__snake_case : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 61 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__(self) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__snake_case : Union[str, Any] = nn.Linear(3 , 4)
__snake_case : str = nn.BatchNormad(4)
__snake_case : str = nn.Linear(4 , 5)
def SCREAMING_SNAKE_CASE__ (self , __a) -> Any:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__a)))
class a_ ( UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self , __a , *__a , **__a) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class a_ ( UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> int:
"""simple docstring"""
return output + 1
class a_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case : Dict = ModelForTest()
__snake_case : Dict = ModelHook()
add_hook_to_module(__a , __a)
self.assertEqual(test_model._hf_hook , __a)
self.assertTrue(hasattr(__a , '_old_forward'))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['x'])
remove_hook_from_module(__a)
self.assertFalse(hasattr(__a , '_hf_hook'))
self.assertFalse(hasattr(__a , '_old_forward'))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : List[Any] = ModelHook()
add_hook_to_module(__a , __a)
add_hook_to_module(__a , __a , append=__a)
self.assertEqual(isinstance(test_model._hf_hook , __a) , __a)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(__a , '_old_forward'))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['x'])
remove_hook_from_module(__a)
self.assertFalse(hasattr(__a , '_hf_hook'))
self.assertFalse(hasattr(__a , '_old_forward'))
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Union[str, Any] = torch.randn(2 , 3)
__snake_case : Dict = test_model(x + 1)
__snake_case : int = test_model(x + 2)
__snake_case : Optional[Any] = PreForwardHook()
add_hook_to_module(__a , __a)
__snake_case : int = test_model(__a)
self.assertTrue(torch.allclose(__a , __a , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Dict = PreForwardHook()
add_hook_to_module(__a , __a)
__snake_case : str = test_model(__a)
self.assertTrue(torch.allclose(__a , __a , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
__snake_case : Dict = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(__a , __a)
__snake_case : Tuple = test_model(__a)
assert torch.allclose(__a , __a , atol=1E-5)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = ModelForTest()
__snake_case : Optional[int] = torch.randn(2 , 3)
__snake_case : int = test_model(__a)
__snake_case : Any = PostForwardHook()
add_hook_to_module(__a , __a)
__snake_case : List[str] = test_model(__a)
self.assertTrue(torch.allclose(__a , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Tuple = PostForwardHook()
add_hook_to_module(__a , __a)
__snake_case : Optional[int] = test_model(__a)
self.assertTrue(torch.allclose(__a , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
__snake_case : List[Any] = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(__a , __a)
__snake_case : Optional[int] = test_model(__a)
assert torch.allclose(__a , output + 2 , atol=1E-5)
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Dict = torch.randn(2 , 3)
__snake_case : List[str] = test_model(__a)
__snake_case : List[str] = PostForwardHook()
add_hook_to_module(__a , __a)
__snake_case : int = test_model(__a)
self.assertTrue(torch.allclose(__a , output + 1))
self.assertTrue(outputa.requires_grad)
__snake_case : List[str] = True
__snake_case : Any = test_model(__a)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Union[str, Any] = torch.randn(2 , 3)
__snake_case : str = model(__a)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a))
__snake_case : List[Any] = torch.randn(2 , 3).to(0)
__snake_case : str = model(__a)
self.assertEqual(output.device , torch.device(0))
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
__snake_case : str = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a))
add_hook_to_module(model.lineara , AlignDevicesHook(**__a))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(hook_kwargs['execution_device'])
self.assertEqual(model.batchnorm.running_mean.device , __a)
__snake_case : List[str] = torch.randn(2 , 3)
__snake_case : int = model(__a)
self.assertEqual(output.device , __a)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
__snake_case : Optional[Any] = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a))
add_hook_to_module(model.lineara , AlignDevicesHook(**__a))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
__snake_case : Dict = torch.randn(2 , 3)
__snake_case : Optional[Any] = model(__a)
self.assertEqual(output.device , __a)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
__snake_case : Any = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__a , execution_device=__a , offload=__a)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Dict = torch.device(__a)
self.assertEqual(model.batchnorm.running_mean.device , __a)
__snake_case : Dict = torch.randn(2 , 3)
__snake_case : List[Any] = model(__a)
self.assertEqual(output.device , __a)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
__snake_case : List[str] = torch.randn(2 , 3)
__snake_case : int = model(__a)
self.assertEqual(output.device , __a)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__a)
self.assertEqual(model.batchnorm.running_mean.device , __a)
__snake_case : Optional[int] = torch.randn(2 , 3)
__snake_case : Dict = model(__a)
self.assertEqual(output.device , __a)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
__snake_case : int = torch.randn(2 , 3)
__snake_case : Tuple = model(__a)
self.assertEqual(output.device , __a)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu')) | 61 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : int = encoder_stride
__snake_case : List[str] = num_attention_outputs
__snake_case : Optional[Any] = embed_dim
__snake_case : Optional[Any] = embed_dim + 1
__snake_case : List[str] = resolution
__snake_case : Optional[int] = depths
__snake_case : List[Any] = hidden_sizes
__snake_case : List[str] = dim
__snake_case : Union[str, Any] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a)
__snake_case : int = model(__a , training=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.type_sequence_label_size
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : Optional[int] = model(__a , labels=__a , training=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : str = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFEfficientFormerModelTester(self)
__snake_case : List[Any] = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__a)
__snake_case : Union[str, Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a):
__snake_case : str = model_class(__a)
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a) , __a)
if hasattr(self.model_tester , 'encoder_seq_length'):
__snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
__snake_case : str = seq_length * self.model_tester.chunk_length
else:
__snake_case : Optional[int] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , __a)
__snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int:
"""simple docstring"""
__snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = TFEfficientFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
__snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a)
__snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
__snake_case : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
__snake_case : Dict = False
__snake_case : Optional[int] = True
__snake_case : Dict = model_class(__a)
__snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Dict = True
__snake_case : str = model_class(__a)
__snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case : Tuple = model_class(__a)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case : Tuple = model(__a)
self.assertTrue(outputs_dict is not None)
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : List[str] = model(**__a , training=__a)
# verify the logits
__snake_case : str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__snake_case : List[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : Optional[int] = model(**__a , training=__a)
# verify the logits
__snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) | 61 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 |
'''simple docstring'''
__A = {str(digit): digit**5 for digit in range(1_0)}
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution()) | 61 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__A = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__A = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__A = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__A = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__A = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , A )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case : Optional[Any] = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__snake_case : Optional[Any] = collections.defaultdict(A )
__snake_case : List[str] = collections.defaultdict(A )
__snake_case : Optional[Any] = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
__snake_case : Tuple = None
if _re_tf_models.match(A ) is not None:
__snake_case : Tuple = tf_models
__snake_case : str = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
__snake_case : int = flax_models
__snake_case : Dict = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
__snake_case : int = pt_models
__snake_case : int = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
__snake_case : Optional[int] = True
break
# Try again after removing the last word in the name
__snake_case : Tuple = ''.join(camel_case_split(A )[:-1] )
__snake_case : Optional[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__snake_case : List[Any] = list(A )
all_models.sort()
__snake_case : List[Any] = {'model_type': all_models}
__snake_case : str = [pt_models[t] for t in all_models]
__snake_case : List[str] = [tf_models[t] for t in all_models]
__snake_case : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__snake_case : Optional[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__snake_case : Any = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__snake_case : Optional[Any] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__snake_case : Optional[int] = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__snake_case : Optional[Any] = 'AutoTokenizer'
__snake_case : Optional[int] = [processors[t] for t in all_models]
return pd.DataFrame(A )
def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__snake_case : Any = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__snake_case : Optional[Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
__snake_case : Any = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = get_frameworks_table()
__snake_case : Optional[Any] = Dataset.from_pandas(A )
__snake_case : Optional[int] = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=A )
__snake_case : Optional[int] = Dataset.from_json(A )
__snake_case : int = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(A ) )
}
__snake_case : int = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__snake_case : Tuple = sorted(table.keys() )
__snake_case : Tuple = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__snake_case : Any = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(A , 'pipeline_tags.json' ) )
if commit_sha is not None:
__snake_case : int = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__snake_case : Tuple = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=A , repo_type='dataset' , token=A , commit_message=A , )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__snake_case : Any = transformers_module.pipelines.SUPPORTED_TASKS
__snake_case : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
__snake_case : str = pipeline_tasks[key]['pt']
if isinstance(A , (list, tuple) ):
__snake_case : List[str] = model[0]
__snake_case : Dict = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
__snake_case : Union[str, Any] = ', '.join(A )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
__A = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha) | 61 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a_ :
_snake_case = 42
_snake_case = None
_snake_case = None
def _SCREAMING_SNAKE_CASE ( ) -> Node | None:
"""simple docstring"""
__snake_case : str = Node(1 )
__snake_case : Tuple = Node(2 )
__snake_case : Optional[int] = Node(3 )
__snake_case : List[str] = Node(4 )
__snake_case : List[str] = Node(5 )
return tree
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
__snake_case : list[Any] = []
if root is None:
return output
__snake_case : Optional[int] = deque([root] )
while process_queue:
__snake_case : List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]:
"""simple docstring"""
__snake_case : list[Any] = []
def populate_output(A : Node | None , A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]:
"""simple docstring"""
__snake_case : list[Any] = []
def populate_output(A : Node | None , A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
__snake_case : list[Sequence[Node | None]] = []
__snake_case : List[Any] = 0
__snake_case : int = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
__snake_case : int = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
__snake_case : Tuple = 0
return output
def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
"""simple docstring"""
__snake_case : Optional[int] = make_tree()
print(F"""In-order Traversal: {inorder(A )}""" )
print(F"""Pre-order Traversal: {preorder(A )}""" )
print(F"""Post-order Traversal: {postorder(A )}""" , '\n' )
print(F"""Height of Tree: {height(A )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 61 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( A : int ) -> str:
"""simple docstring"""
__snake_case : List[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
__snake_case : Optional[Any] = 10_24
__snake_case : Union[str, Any] = 40_96
__snake_case : Any = 24
__snake_case : int = 16
__snake_case : Dict = [5, 11, 17, 23]
__snake_case : List[str] = [2_56, 5_12, 10_24, 10_24]
__snake_case : Optional[Any] = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
__snake_case : int = 7_68
__snake_case : Tuple = [1, 1, 1, 0.5]
__snake_case : Union[str, Any] = [2_56, 5_12, 7_68, 7_68]
__snake_case : Optional[int] = 1_50
__snake_case : Any = 16
__snake_case : Optional[Any] = (1, 3_84, 3_84)
__snake_case : int = False
__snake_case : List[str] = 'project'
if "ade" in checkpoint_url:
__snake_case : List[str] = True
__snake_case : List[str] = 7_68
__snake_case : Optional[int] = [1, 1, 1, 0.5]
__snake_case : Optional[Any] = 1_50
__snake_case : int = 16
__snake_case : Any = 'huggingface/label-files'
__snake_case : Optional[Any] = 'ade20k-id2label.json'
__snake_case : Tuple = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
__snake_case : List[str] = {int(A ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
__snake_case : Tuple = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> Any:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
__snake_case : str = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
__snake_case : Union[str, Any] = name.replace('patch_embed' , '' )
if "pos_embed" in name:
__snake_case : Union[str, Any] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
__snake_case : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
__snake_case : Optional[Any] = name.replace('proj' , 'projection' )
if "blocks" in name:
__snake_case : Optional[int] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
__snake_case : int = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case : Any = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
__snake_case : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
__snake_case : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
__snake_case : List[str] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
__snake_case : Any = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
__snake_case : List[Any] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
__snake_case : Optional[Any] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
__snake_case : List[str] = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
__snake_case : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
__snake_case : List[str] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case : Optional[Any] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__snake_case : Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
__snake_case : str = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
__snake_case : Union[str, Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
__snake_case : Optional[Any] = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
__snake_case : Tuple = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case : str = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case : Dict = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case : Tuple = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case : Optional[int] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case : Any = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__snake_case : Optional[int] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__snake_case : str = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__snake_case : str = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__snake_case : Tuple = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__snake_case : str = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__snake_case : Optional[int] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__snake_case : List[str] = name.replace('pretrained' , 'dpt' )
if "bn" in name:
__snake_case : List[str] = name.replace('bn' , 'batch_norm' )
if "head" in name:
__snake_case : Optional[Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
__snake_case : Optional[int] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
__snake_case : List[Any] = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
__snake_case : List[str] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
__snake_case : List[str] = name.replace('..' , '.' )
if "stem.conv" in name:
__snake_case : Any = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
__snake_case : List[str] = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
__snake_case : int = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
__snake_case : Optional[int] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
__snake_case : Union[str, Any] = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
__snake_case : int = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
__snake_case : str = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def _SCREAMING_SNAKE_CASE ( A : Dict , A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Any = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__snake_case : Optional[int] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: config.hidden_size, :]
__snake_case : Dict = in_proj_bias[: config.hidden_size]
__snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Tuple = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : str = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : Tuple = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( A : List[str] , A : str , A : int , A : List[Any] , A : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case ,__snake_case : int = get_dpt_config(A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__snake_case : Any = torch.load(A , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(A )
# rename keys
for key in state_dict.copy().keys():
__snake_case : int = state_dict.pop(A )
__snake_case : List[Any] = val
# read in qkv matrices
read_in_q_k_v(A , A )
# load HuggingFace model
__snake_case : Optional[Any] = DPTForSemanticSegmentation(A ) if 'ade' in checkpoint_url else DPTForDepthEstimation(A )
model.load_state_dict(A )
model.eval()
# Check outputs on an image
__snake_case : str = 4_80 if 'ade' in checkpoint_url else 3_84
__snake_case : Dict = DPTImageProcessor(size=A )
__snake_case : int = prepare_img()
__snake_case : Optional[int] = image_processor(A , return_tensors='pt' )
# forward pass
__snake_case : List[Any] = model(**A ).logits if 'ade' in checkpoint_url else model(**A ).predicted_depth
if show_prediction:
__snake_case : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
__A = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 61 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a_ :
def __init__(self , __a = None) -> None:
"""simple docstring"""
if components is None:
__snake_case : List[str] = []
__snake_case : Optional[int] = list(__a)
def __len__(self) -> int:
"""simple docstring"""
return len(self.__components)
def __str__(self) -> str:
"""simple docstring"""
return "(" + ",".join(map(__a , self.__components)) + ")"
def __add__(self , __a) -> Vector:
"""simple docstring"""
__snake_case : Optional[Any] = len(self)
if size == len(__a):
__snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)]
return Vector(__a)
else:
raise Exception('must have the same size')
def __sub__(self , __a) -> Vector:
"""simple docstring"""
__snake_case : Optional[Any] = len(self)
if size == len(__a):
__snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)]
return Vector(__a)
else: # error case
raise Exception('must have the same size')
@overload
def __mul__(self , __a) -> Vector:
"""simple docstring"""
...
@overload
def __mul__(self , __a) -> float:
"""simple docstring"""
...
def __mul__(self , __a) -> float | Vector:
"""simple docstring"""
if isinstance(__a , (float, int)):
__snake_case : str = [c * other for c in self.__components]
return Vector(__a)
elif isinstance(__a , __a) and len(self) == len(__a):
__snake_case : List[Any] = len(self)
__snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)]
return sum(__a)
else: # error case
raise Exception('invalid operand!')
def SCREAMING_SNAKE_CASE__ (self) -> Vector:
"""simple docstring"""
return Vector(self.__components)
def SCREAMING_SNAKE_CASE__ (self , __a) -> float:
"""simple docstring"""
if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception('index out of range')
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None:
"""simple docstring"""
assert -len(self.__components) <= pos < len(self.__components)
__snake_case : int = value
def SCREAMING_SNAKE_CASE__ (self) -> float:
"""simple docstring"""
if len(self.__components) == 0:
raise Exception('Vector is empty')
__snake_case : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(__a))
def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float:
"""simple docstring"""
__snake_case : Tuple = self * other
__snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A ) and (isinstance(A , A ))
__snake_case : Any = [0] * dimension
__snake_case : int = 1
return Vector(A )
def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(A , A )
and isinstance(A , A )
and (isinstance(A , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector:
"""simple docstring"""
random.seed(A )
__snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )]
return Vector(A )
class a_ :
def __init__(self , __a , __a , __a) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = matrix
__snake_case : int = w
__snake_case : str = h
def __str__(self) -> str:
"""simple docstring"""
__snake_case : Dict = ''
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__(self , __a) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : Tuple = []
for i in range(self.__height):
__snake_case : List[Any] = [
self.__matrix[i][j] + other.component(__a , __a)
for j in range(self.__width)
]
matrix.append(__a)
return Matrix(__a , self.__width , self.__height)
else:
raise Exception('matrix must have the same dimension!')
def __sub__(self , __a) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : str = []
for i in range(self.__height):
__snake_case : List[str] = [
self.__matrix[i][j] - other.component(__a , __a)
for j in range(self.__width)
]
matrix.append(__a)
return Matrix(__a , self.__width , self.__height)
else:
raise Exception('matrices must have the same dimension!')
@overload
def __mul__(self , __a) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__(self , __a) -> Vector:
"""simple docstring"""
...
def __mul__(self , __a) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__a , __a): # matrix-vector
if len(__a) == self.__width:
__snake_case : Tuple = zero_vector(self.__height)
for i in range(self.__height):
__snake_case : Union[str, Any] = [
self.__matrix[i][j] * other.component(__a)
for j in range(self.__width)
]
ans.change_component(__a , sum(__a))
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!')
elif isinstance(__a , (int, float)): # matrix-scalar
__snake_case : str = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(__a , self.__width , self.__height)
return None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return self.__height
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return self.__width
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__snake_case : List[Any] = value
else:
raise Exception('change_component: indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
__snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a)):
__snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1).determinant()
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a)
else:
raise Exception('Indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
if self.__height < 1:
raise Exception('Matrix has no element')
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__snake_case : Any = [
self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width)
]
return sum(__a)
def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix:
"""simple docstring"""
__snake_case : list[list[float]] = [[0] * n for _ in range(A )]
return Matrix(A , A , A )
def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix:
"""simple docstring"""
random.seed(A )
__snake_case : list[list[float]] = [
[random.randint(A , A ) for _ in range(A )] for _ in range(A )
]
return Matrix(A , A , A ) | 61 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a_ ( UpperCamelCase_ ):
_snake_case = """speech_to_text"""
_snake_case = ["""past_key_values"""]
_snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self , __a=1_0_0_0_0 , __a=1_2 , __a=2_0_4_8 , __a=4 , __a=6 , __a=2_0_4_8 , __a=4 , __a=0.0 , __a=0.0 , __a=True , __a=True , __a="relu" , __a=2_5_6 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=2 , __a=True , __a=1 , __a=0 , __a=2 , __a=6_0_0_0 , __a=1_0_2_4 , __a=2 , __a=(5, 5) , __a=1_0_2_4 , __a=8_0 , __a=1 , **__a , ) -> str:
"""simple docstring"""
__snake_case : List[str] = vocab_size
__snake_case : Tuple = d_model
__snake_case : str = encoder_ffn_dim
__snake_case : Optional[int] = encoder_layers
__snake_case : Tuple = encoder_attention_heads
__snake_case : Dict = decoder_ffn_dim
__snake_case : Optional[Any] = decoder_layers
__snake_case : str = decoder_attention_heads
__snake_case : Optional[int] = dropout
__snake_case : str = attention_dropout
__snake_case : Tuple = activation_dropout
__snake_case : Tuple = activation_function
__snake_case : Dict = init_std
__snake_case : Any = encoder_layerdrop
__snake_case : Union[str, Any] = decoder_layerdrop
__snake_case : List[str] = use_cache
__snake_case : List[Any] = encoder_layers
__snake_case : int = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Union[str, Any] = max_source_positions
__snake_case : Union[str, Any] = max_target_positions
__snake_case : Any = num_conv_layers
__snake_case : Optional[int] = list(__a)
__snake_case : Dict = conv_channels
__snake_case : Union[str, Any] = input_feat_per_channel
__snake_case : Optional[int] = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""")
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , ) | 61 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__A = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__A = '''main'''
# Default branch name
__A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
__A = '''aaaaaaa'''
# This commit does not exist, so we should 404.
__A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
__A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class a_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers') is not None
class a_ ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE__ (self , __a) -> int:
"""simple docstring"""
with ContextManagers([]):
print('Transformers are awesome!')
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]:
"""simple docstring"""
with ContextManagers([context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n')
@require_torch
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(find_labels(__a) , ['labels'])
self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions'])
class a_ ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__a) , ['labels'])
@require_tf
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
self.assertEqual(find_labels(__a) , ['labels'])
self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions'])
class a_ ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__a) , ['labels'])
@require_flax
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
self.assertEqual(find_labels(__a) , [])
self.assertEqual(find_labels(__a) , [])
self.assertEqual(find_labels(__a) , [])
class a_ ( UpperCamelCase_ ):
pass
self.assertEqual(find_labels(__a) , []) | 61 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__(self , __a , __a=1_3 , __a=[3_0, 3_0] , __a=2 , __a=3 , __a=True , __a=True , __a=3_2 , __a=5 , __a=4 , __a=3_7 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1_0 , __a=0.02 , __a=3 , __a=None , __a=8 , __a=1_0 , ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = parent
__snake_case : Tuple = batch_size
__snake_case : Optional[int] = image_size
__snake_case : Any = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Any = is_training
__snake_case : Dict = use_labels
__snake_case : Tuple = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = type_sequence_label_size
__snake_case : List[Any] = initializer_range
__snake_case : List[str] = num_labels
__snake_case : int = scope
__snake_case : str = n_targets
__snake_case : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__snake_case : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__snake_case : List[Any] = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__snake_case : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__snake_case : Union[str, Any] = []
for i in range(self.batch_size):
__snake_case : Dict = {}
__snake_case : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__a)
__snake_case : Any = torch.rand(self.n_targets , 4 , device=__a)
labels.append(__a)
__snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = YolosModel(config=__a)
model.to(__a)
model.eval()
__snake_case : Dict = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = YolosForObjectDetection(__a)
model.to(__a)
model.eval()
__snake_case : Union[str, Any] = model(pixel_values=__a)
__snake_case : Dict = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__snake_case : List[Any] = model(pixel_values=__a , labels=__a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Dict = config_and_inputs
__snake_case : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_snake_case = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> str:
"""simple docstring"""
__snake_case : Dict = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__snake_case : List[str] = []
for i in range(self.model_tester.batch_size):
__snake_case : str = {}
__snake_case : List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__a , dtype=torch.long)
__snake_case : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=__a , dtype=torch.float)
labels.append(__a)
__snake_case : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = YolosModelTester(self)
__snake_case : List[str] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case ,__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__a)
__snake_case : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case ,__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = True
# in YOLOS, the seq_len is different
__snake_case : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__snake_case : List[Any] = True
__snake_case : List[str] = False
__snake_case : List[Any] = True
__snake_case : Any = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__a , __a))
__snake_case : Tuple = outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : int = True
__snake_case : int = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a))
__snake_case : int = outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__snake_case : Optional[int] = len(__a)
# Check attention is always last and order is fine
__snake_case : List[str] = True
__snake_case : Union[str, Any] = True
__snake_case : List[str] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__a , __a))
__snake_case : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(__a))
__snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a):
__snake_case : Optional[int] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a))
__snake_case : Optional[int] = outputs.hidden_states
__snake_case : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a) , __a)
# YOLOS has a different seq_length
__snake_case : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__snake_case ,__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = YolosModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : int = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(__a)
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : Optional[Any] = image_processor(images=__a , return_tensors='pt').to(__a)
# forward pass
with torch.no_grad():
__snake_case : int = model(inputs.pixel_values)
# verify outputs
__snake_case : Optional[Any] = torch.Size((1, 1_0_0, 9_2))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Optional[int] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=__a , )
__snake_case : int = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1E-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1E-4))
# verify postprocessing
__snake_case : Optional[int] = image_processor.post_process_object_detection(
__a , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__snake_case : List[str] = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861]).to(__a)
__snake_case : Tuple = [7_5, 7_5, 1_7, 6_3, 1_7]
__snake_case : Any = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495]).to(__a)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , __a , atol=1E-4))
self.assertSequenceEqual(results['labels'].tolist() , __a)
self.assertTrue(torch.allclose(results['boxes'][0, :] , __a)) | 61 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__A = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = os.path.dirname(os.path.realpath(A ) )
__snake_case : List[Any] = os.path.join(A , 'words.txt' )
__snake_case : Dict = ''
with open(A ) as f:
__snake_case : List[str] = f.readline()
__snake_case : Optional[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__snake_case : Dict = [
word
for word in [sum(ord(A ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A )
if __name__ == "__main__":
print(solution()) | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
__snake_case : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
while number > 0:
__snake_case : Dict = number % 10
sum_of_digits += last_digit
__snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int:
"""simple docstring"""
__snake_case : List[Any] = factorial(A )
__snake_case : Dict = split_and_add(A )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 61 | 1 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__A = '''src/diffusers'''
# Matches is_xxx_available()
__A = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__A = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__A = '''
{0} = None
'''
__A = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__A = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _SCREAMING_SNAKE_CASE ( A : str ) -> Dict:
"""simple docstring"""
__snake_case : str = _re_backend.findall(A )
if len(A ) == 0:
return None
return "_and_".join(A )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(A , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__snake_case : List[str] = f.readlines()
# Get to the point we do the actual imports for type checking
__snake_case : Dict = 0
__snake_case : int = {}
# Go through the end of the file
while line_index < len(A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__snake_case : Tuple = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__snake_case : int = []
# Until we unindent, add backend objects to the list
while line_index < len(A ) and len(lines[line_index] ) > 1:
__snake_case : Dict = lines[line_index]
__snake_case : Optional[Any] = _re_single_line_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(A ) > 0:
__snake_case : List[str] = objects
else:
line_index += 1
return backend_specific_objects
def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Optional[int] ) -> Dict:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(A )
elif name.islower():
return DUMMY_FUNCTION.format(A , A )
else:
return DUMMY_CLASS.format(A , A )
def _SCREAMING_SNAKE_CASE ( A : int=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
__snake_case : Optional[Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__snake_case : Optional[int] = {}
for backend, objects in backend_specific_objects.items():
__snake_case : Tuple = '[' + ', '.join(F"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']'
__snake_case : Union[str, Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(A , A ) for o in objects] )
__snake_case : List[str] = dummy_file
return dummy_files
def _SCREAMING_SNAKE_CASE ( A : List[str]=False ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__snake_case : Union[str, Any] = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__snake_case : List[Any] = os.path.join(A , 'utils' )
__snake_case : int = {
backend: os.path.join(A , F"""dummy_{short_names.get(A , A )}_objects.py""" )
for backend in dummy_files.keys()
}
__snake_case : int = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(A ):
with open(A , 'r' , encoding='utf-8' , newline='\n' ) as f:
__snake_case : List[Any] = f.read()
else:
__snake_case : Tuple = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(A , A )}_objects.py as the main """
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F"""diffusers.utils.dummy_{short_names.get(A , A )}_objects.py. Run `make fix-copies` """
'to fix this.' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__A = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 61 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a_ ( unittest.TestCase ):
def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__snake_case : Optional[int] = parent
__snake_case : Dict = batch_size
__snake_case : str = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : Optional[int] = do_resize
__snake_case : Optional[int] = size
__snake_case : Union[str, Any] = do_center_crop
__snake_case : List[Any] = crop_size
__snake_case : int = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : str = image_std
__snake_case : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__snake_case : Dict = []
for i in range(self.batch_size):
__snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
if torchify:
__snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4})
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8})
__snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4})
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
__snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : int = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a)
__snake_case : List[Any] = 3
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 61 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__(self , __a , __a=1_3 , __a=3_0 , __a=2 , __a=3 , __a=True , __a=True , __a=3_2 , __a=2 , __a=4 , __a=3_7 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1_0 , __a=0.02 , __a=3 , __a=None , __a=2 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[str] = patch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = is_training
__snake_case : Tuple = use_labels
__snake_case : Dict = hidden_size
__snake_case : Dict = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[int] = scope
__snake_case : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__snake_case : Optional[Any] = (image_size // patch_size) ** 2
__snake_case : int = num_patches + 2
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : str = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> List[Any]:
"""simple docstring"""
__snake_case : Any = TFDeiTModel(config=__a)
__snake_case : Tuple = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = TFDeiTForMaskedImageModeling(config=__a)
__snake_case : int = model(__a)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : Any = TFDeiTForMaskedImageModeling(__a)
__snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : Any = model(__a)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> List[str]:
"""simple docstring"""
__snake_case : Any = self.type_sequence_label_size
__snake_case : Dict = TFDeiTForImageClassification(__a)
__snake_case : Union[str, Any] = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : Dict = 1
__snake_case : List[Any] = TFDeiTForImageClassification(__a)
__snake_case : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : Dict = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Tuple = config_and_inputs
__snake_case : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : Any = TFDeiTModelTester(self)
__snake_case : Tuple = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case ,__snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Dense))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(__a)
__snake_case : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = TFDeiTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def _SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__snake_case : List[str] = self.default_image_processor
__snake_case : Any = prepare_img()
__snake_case : List[str] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : Tuple = model(**__a)
# verify the logits
__snake_case : List[Any] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : List[str] = tf.constant([-1.0_266, 0.1_912, -1.2_861])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) | 61 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """vit_msn"""
def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(**__a)
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = qkv_bias | 61 | 1 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( A : list[int] , A : list[int] , A : list[int] , A : list[list[str]] , A : int , ) -> None:
"""simple docstring"""
__snake_case : str = len(A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A , A , )
def _SCREAMING_SNAKE_CASE ( A : int ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , A , A )
# Print all the boards
for board in boards:
for column in board:
print(A )
print('' )
print(len(A ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__snake_case : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) )
return round(A , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__A = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : List[Any] = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( A : int , A : Union[str, Any]="eval" ) -> List[str]:
"""simple docstring"""
__snake_case : int = os.path.join(A , F"""{split}_results.json""" )
if os.path.exists(A ):
with open(A , 'r' ) as f:
return json.load(A )
raise ValueError(F"""can't find {path}""" )
__A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a_ ( UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : Union[str, Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__a , 'argv' , __a):
run_flax_glue.main()
__snake_case : List[Any] = get_results(__a)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
__snake_case : Any = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , 'argv' , __a):
run_clm_flax.main()
__snake_case : Any = get_results(__a)
self.assertLess(result['eval_perplexity'] , 1_0_0)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : Any = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__a , 'argv' , __a):
run_summarization_flax.main()
__snake_case : Dict = get_results(__a , split='test')
self.assertGreaterEqual(result['test_rouge1'] , 1_0)
self.assertGreaterEqual(result['test_rouge2'] , 2)
self.assertGreaterEqual(result['test_rougeL'] , 7)
self.assertGreaterEqual(result['test_rougeLsum'] , 7)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : int = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__a , 'argv' , __a):
run_mlm_flax.main()
__snake_case : Optional[Any] = get_results(__a)
self.assertLess(result['eval_perplexity'] , 4_2)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : List[Any] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , 'argv' , __a):
run_ta_mlm_flax.main()
__snake_case : List[Any] = get_results(__a)
self.assertGreaterEqual(result['eval_accuracy'] , 0.42)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__a , 'argv' , __a):
run_flax_ner.main()
__snake_case : List[Any] = get_results(__a)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
self.assertGreaterEqual(result['eval_f1'] , 0.3)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
__snake_case : Optional[Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__a , 'argv' , __a):
run_qa.main()
__snake_case : Tuple = get_results(__a)
self.assertGreaterEqual(result['eval_f1'] , 3_0)
self.assertGreaterEqual(result['eval_exact'] , 3_0) | 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Any = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.dummy_uncond_unet
__snake_case : int = ScoreSdeVeScheduler()
__snake_case : int = ScoreSdeVePipeline(unet=__a , scheduler=__a)
sde_ve.to(__a)
sde_ve.set_progress_bar_config(disable=__a)
__snake_case : Tuple = torch.manual_seed(0)
__snake_case : Any = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__a).images
__snake_case : Any = torch.manual_seed(0)
__snake_case : Dict = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__a , return_dict=__a)[
0
]
__snake_case : str = image[0, -3:, -3:, -1]
__snake_case : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__snake_case : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Tuple = 'google/ncsnpp-church-256'
__snake_case : Dict = UNetaDModel.from_pretrained(__a)
__snake_case : List[Any] = ScoreSdeVeScheduler.from_pretrained(__a)
__snake_case : Tuple = ScoreSdeVePipeline(unet=__a , scheduler=__a)
sde_ve.to(__a)
sde_ve.set_progress_bar_config(disable=__a)
__snake_case : Optional[int] = torch.manual_seed(0)
__snake_case : Optional[Any] = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=__a).images
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__snake_case : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 | 61 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A ) | 61 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__(self , __a=2 , __a=3 , __a=6_4 , __a=None) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = np.random.default_rng(__a)
__snake_case : Dict = length
__snake_case : str = rng.normal(size=(length,)).astype(np.floataa)
__snake_case : int = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__(self) -> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__(self , __a) -> Optional[int]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__(self , __a=0 , __a=0 , __a=False) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3]).float())
__snake_case : List[Any] = torch.nn.Parameter(torch.tensor([2, 3]).float())
__snake_case : str = True
def SCREAMING_SNAKE_CASE__ (self , __a=None) -> Optional[Any]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
__snake_case : Optional[Any] = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__(self , __a=0 , __a=0 , __a=False) -> List[str]:
"""simple docstring"""
super().__init__()
__snake_case : int = torch.nn.Parameter(torch.tensor(__a).float())
__snake_case : Tuple = torch.nn.Parameter(torch.tensor(__a).float())
__snake_case : List[str] = True
def SCREAMING_SNAKE_CASE__ (self , __a=None) -> List[Any]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
__snake_case : Dict = False
return x * self.a + self.b
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : int = 16 ) -> int:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__snake_case : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case : Optional[Any] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
__snake_case : Tuple = load_dataset('csv' , data_files=A )
__snake_case : Optional[int] = datasets['train'].unique('label' )
__snake_case : Optional[Any] = {v: i for i, v in enumerate(A )}
def tokenize_function(A : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : Tuple = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A , padding='max_length' )
if "label" in examples:
__snake_case : Tuple = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case : List[str] = datasets.map(
A , batched=A , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(A : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(A , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__snake_case : Tuple = DataLoader(tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=2 )
__snake_case : Union[str, Any] = DataLoader(tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
__A = {
'''Salesforce/codegen-350M-mono''': 2_0_4_8,
}
class a_ ( UpperCamelCase_ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = CodeGenTokenizer
def __init__(self , __a=None , __a=None , __a=None , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|endoftext|>" , __a=False , **__a , ) -> Dict:
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
if kwargs.pop('add_bos_token' , __a):
__snake_case : List[Any] = kwargs.pop('name_or_path' , '')
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.')
__snake_case : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , __a) != add_prefix_space:
__snake_case : Dict = getattr(__a , pre_tok_state.pop('type'))
__snake_case : Dict = add_prefix_space
__snake_case : Any = pre_tok_class(**__a)
__snake_case : Any = add_prefix_space
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> BatchEncoding:
"""simple docstring"""
__snake_case : List[str] = kwargs.get('is_split_into_words' , __a)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> BatchEncoding:
"""simple docstring"""
__snake_case : Tuple = kwargs.get('is_split_into_words' , __a)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> Tuple[str]:
"""simple docstring"""
__snake_case : int = self._tokenizer.model.save(__a , name=__a)
return tuple(__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = False , __a = None , __a = None , **__a , ) -> str:
"""simple docstring"""
__snake_case : Dict = super().decode(
token_ids=__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , **__a , )
if truncate_before_pattern is not None and len(__a) > 0:
__snake_case : Tuple = self.truncate(__a , __a)
return decoded_text
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> str:
"""simple docstring"""
def find_re(__a , __a , __a):
__snake_case : List[Any] = pattern.search(__a , __a)
return m.start() if m else -1
__snake_case : Union[str, Any] = [re.compile(__a , re.MULTILINE) for pattern in truncate_before_pattern]
__snake_case : List[str] = list(re.finditer('^print' , __a , re.MULTILINE))
if len(__a) > 1:
__snake_case : Tuple = completion[: prints[1].start()]
__snake_case : Dict = list(re.finditer('^def' , __a , re.MULTILINE))
if len(__a) > 1:
__snake_case : Union[str, Any] = completion[: defs[1].start()]
__snake_case : List[str] = 0
__snake_case : Optional[int] = [
pos for pos in [find_re(__a , __a , __a) for terminal in terminals] if pos != -1
]
if len(__a) > 0:
return completion[: min(__a)]
else:
return completion | 61 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int:
"""simple docstring"""
if not is_accelerate_available():
return method
__snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(A ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *A , **A )
return wrapper | 61 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__A = '''src/transformers'''
__A = '''docs/source/en'''
__A = '''.'''
def _SCREAMING_SNAKE_CASE ( A : Any , A : str , A : int ) -> Tuple:
"""simple docstring"""
with open(A , 'r' , encoding='utf-8' , newline='\n' ) as f:
__snake_case : Union[str, Any] = f.readlines()
# Find the start prompt.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(A ):
start_index += 1
start_index += 1
__snake_case : Tuple = start_index
while not lines[end_index].startswith(A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__A = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
__A = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__A = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__A = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(TRANSFORMERS_PATH)
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , A )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( A : Tuple , A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = 2 if text == '✅' or text == '❌' else len(A )
__snake_case : str = (width - text_length) // 2
__snake_case : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__snake_case : List[Any] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__snake_case : Optional[Any] = collections.defaultdict(A )
__snake_case : Any = collections.defaultdict(A )
__snake_case : List[Any] = collections.defaultdict(A )
__snake_case : Tuple = collections.defaultdict(A )
__snake_case : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once).
for attr_name in dir(A ):
__snake_case : List[str] = None
if attr_name.endswith('Tokenizer' ):
__snake_case : Tuple = slow_tokenizers
__snake_case : int = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__snake_case : Optional[Any] = fast_tokenizers
__snake_case : str = attr_name[:-13]
elif _re_tf_models.match(A ) is not None:
__snake_case : Optional[Any] = tf_models
__snake_case : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
__snake_case : Dict = flax_models
__snake_case : Dict = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
__snake_case : Union[str, Any] = pt_models
__snake_case : List[str] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_name_to_prefix.values():
__snake_case : str = True
break
# Try again after removing the last word in the name
__snake_case : Any = ''.join(camel_case_split(A )[:-1] )
# Let's build that table!
__snake_case : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__snake_case : Union[str, Any] = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__snake_case : Optional[int] = [len(A ) + 2 for c in columns]
__snake_case : List[str] = max([len(A ) for name in model_names] ) + 2
# Build the table per se
__snake_case : int = '|' + '|'.join([_center_text(A , A ) for c, w in zip(A , A )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__snake_case : Union[str, Any] = {True: '✅', False: '❌'}
for name in model_names:
__snake_case : Dict = model_name_to_prefix[name]
__snake_case : Optional[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(A , A ) for l, w in zip(A , A )] ) + "|\n"
return table
def _SCREAMING_SNAKE_CASE ( A : Dict=False ) -> Dict:
"""simple docstring"""
__snake_case ,__snake_case ,__snake_case ,__snake_case : Dict = _find_text_in_file(
filename=os.path.join(A , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__snake_case : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(A , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__A = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 61 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a_ ( unittest.TestCase , UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = load_tool('text-to-speech')
self.tool.setup()
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Dict = self.tool('hey')
__snake_case : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Any = self.tool('hey')
__snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) | 61 | 1 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( UpperCamelCase_ ):
_snake_case = ["""image_processor""", """tokenizer"""]
_snake_case = """AutoImageProcessor"""
_snake_case = """AutoTokenizer"""
def __init__(self , __a=None , __a=None , **__a) -> Any:
"""simple docstring"""
__snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
__snake_case : Any = kwargs.pop('feature_extractor')
__snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(__a , __a)
__snake_case : List[str] = self.image_processor
__snake_case : int = False
def __call__(self , *__a , **__a) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__a , **__a)
__snake_case : Tuple = kwargs.pop('images' , __a)
__snake_case : str = kwargs.pop('text' , __a)
if len(__a) > 0:
__snake_case : Union[str, Any] = args[0]
__snake_case : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
__snake_case : int = self.image_processor(__a , *__a , **__a)
if text is not None:
__snake_case : Tuple = self.tokenizer(__a , **__a)
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case : List[Any] = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a)
@contextmanager
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
__snake_case : List[str] = True
__snake_case : Optional[Any] = self.tokenizer
yield
__snake_case : Dict = self.image_processor
__snake_case : List[Any] = False
def SCREAMING_SNAKE_CASE__ (self , __a , __a=False , __a=None) -> Optional[Any]:
"""simple docstring"""
if added_vocab is None:
__snake_case : int = self.tokenizer.get_added_vocab()
__snake_case : Any = {}
while tokens:
__snake_case : Any = re.search(R'<s_(.*?)>' , __a , re.IGNORECASE)
if start_token is None:
break
__snake_case : int = start_token.group(1)
__snake_case : Tuple = re.search(RF"""</s_{key}>""" , __a , re.IGNORECASE)
__snake_case : Any = start_token.group()
if end_token is None:
__snake_case : int = tokens.replace(__a , '')
else:
__snake_case : str = end_token.group()
__snake_case : Optional[int] = re.escape(__a)
__snake_case : Tuple = re.escape(__a)
__snake_case : Optional[int] = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , __a , re.IGNORECASE)
if content is not None:
__snake_case : Union[str, Any] = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__snake_case : Union[str, Any] = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a)
if value:
if len(__a) == 1:
__snake_case : Optional[Any] = value[0]
__snake_case : Union[str, Any] = value
else: # leaf nodes
__snake_case : Any = []
for leaf in content.split(R'<sep/>'):
__snake_case : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__snake_case : Optional[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(__a)
if len(output[key]) == 1:
__snake_case : Optional[Any] = output[key][0]
__snake_case : str = tokens[tokens.find(__a) + len(__a) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a)
if len(__a):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , )
return self.image_processor | 61 |
'''simple docstring'''
import math
class a_ :
def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__snake_case : List[str] = n
__snake_case : Tuple = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # adjacency matrix for weight
__snake_case : Union[str, Any] = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = w
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 61 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__A = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__A = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__A = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('1.4.12'):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'),
}) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = False , __a = False , __a = False , __a = False , ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = len(references[0])
if any(len(__a) != references_per_prediction for refs in references):
raise ValueError('Sacrebleu requires the same number of references for each prediction')
__snake_case : Optional[int] = [[refs[i] for refs in references] for i in range(__a)]
__snake_case : Any = TER(
normalized=__a , no_punct=__a , asian_support=__a , case_sensitive=__a , )
__snake_case : Optional[int] = sb_ter.corpus_score(__a , __a)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 61 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class a_ ( UpperCamelCase_ ):
_snake_case = ["""pixel_values"""]
def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__a)
__snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4}
__snake_case : List[Any] = get_size_dict(__a , default_to_square=__a)
__snake_case : int = do_resize
__snake_case : List[str] = size
# Default value set here for backwards compatibility where the value in config is None
__snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__snake_case : Tuple = resample
__snake_case : Dict = do_rescale
__snake_case : Any = rescale_factor
__snake_case : str = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Dict = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""")
__snake_case : List[str] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__snake_case : Any = int(shortest_edge / crop_pct)
__snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a)
__snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct
__snake_case : Tuple = resample if resample is not None else self.resample
__snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case : List[str] = size if size is not None else self.size
__snake_case : Any = get_size_dict(__a , default_to_square=__a)
__snake_case : Dict = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__snake_case : Tuple = [to_numpy_array(__a) for image in images]
if do_resize:
__snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
__snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images]
__snake_case : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a) | 61 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class a_ ( UpperCamelCase_ ):
_snake_case = ["""pixel_values"""]
def __init__(self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__a)
__snake_case : Optional[int] = size if size is not None else {'shortest_edge': 2_5_6}
__snake_case : int = get_size_dict(__a , default_to_square=__a)
__snake_case : Any = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case : Tuple = get_size_dict(__a)
__snake_case : Dict = do_resize
__snake_case : Any = size
__snake_case : Optional[int] = resample
__snake_case : int = do_center_crop
__snake_case : int = crop_size
__snake_case : int = do_rescale
__snake_case : Optional[int] = rescale_factor
__snake_case : Optional[int] = do_normalize
__snake_case : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : str = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
__snake_case : List[str] = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a)
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Dict = get_size_dict(__a)
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a) -> np.ndarray:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : Union[str, Any] = size if size is not None else self.size
__snake_case : List[Any] = get_size_dict(__a , default_to_square=__a)
__snake_case : Tuple = resample if resample is not None else self.resample
__snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Dict = crop_size if crop_size is not None else self.crop_size
__snake_case : str = get_size_dict(__a)
__snake_case : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : Any = image_std if image_std is not None else self.image_std
__snake_case : Optional[int] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__snake_case : Union[str, Any] = [to_numpy_array(__a) for image in images]
if do_resize:
__snake_case : Any = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
__snake_case : Dict = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
__snake_case : Tuple = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
__snake_case : List[str] = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
__snake_case : Union[str, Any] = [to_channel_dimension_format(__a , __a) for image in images]
__snake_case : Any = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a) | 61 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 1_6
__A = 3_2
def _SCREAMING_SNAKE_CASE ( A : Accelerator , A : int = 16 ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case : Optional[Any] = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
__snake_case : Tuple = 8
else:
__snake_case : int = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
__snake_case : str = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
__snake_case : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A = mocked_dataloaders # noqa: F811
def _SCREAMING_SNAKE_CASE ( A : str , A : Tuple ) -> List[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A ) == "1":
__snake_case : Union[str, Any] = 2
# New Code #
__snake_case : int = int(args.gradient_accumulation_steps )
__snake_case : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
__snake_case : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case : Dict = config['lr']
__snake_case : int = int(config['num_epochs'] )
__snake_case : List[Any] = int(config['seed'] )
__snake_case : Optional[int] = int(config['batch_size'] )
__snake_case : str = evaluate.load('glue' , 'mrpc' )
set_seed(A )
__snake_case ,__snake_case : Optional[Any] = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case : Dict = model.to(accelerator.device )
# Instantiate optimizer
__snake_case : int = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
__snake_case : Tuple = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case : int = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
with LocalSGD(
accelerator=A , model=A , local_sgd_steps=A , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A ):
__snake_case : int = model(**A )
__snake_case : List[Any] = output.loss
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case : Tuple = model(**A )
__snake_case : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__snake_case ,__snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A , references=A , )
__snake_case : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=A , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=A , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__snake_case : Any = parser.parse_args()
__snake_case : List[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A , A )
if __name__ == "__main__":
main() | 61 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = VQModel
_snake_case = """sample"""
@property
def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str:
"""simple docstring"""
__snake_case : Dict = 4
__snake_case : Optional[int] = 3
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a)
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__snake_case : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a)
self.assertIsNotNone(__a)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(__a)
__snake_case : Any = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(__a).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
__snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
__snake_case : Optional[int] = image.to(__a)
with torch.no_grad():
__snake_case : List[Any] = model(__a).sample
__snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143])
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3)) | 61 | 1 |
'''simple docstring'''
from __future__ import annotations
class a_ :
def __init__(self , __a=None) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = data
__snake_case : Any = None
def __repr__(self) -> Tuple:
"""simple docstring"""
__snake_case : int = []
__snake_case : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""")
__snake_case : Any = temp.next
return "->".join(__a)
def _SCREAMING_SNAKE_CASE ( A : list ) -> Any:
"""simple docstring"""
if not elements_list:
raise Exception('The Elements List is empty' )
__snake_case : Dict = Node(elements_list[0] )
for i in range(1 , len(A ) ):
__snake_case : Tuple = Node(elements_list[i] )
__snake_case : Optional[Any] = current.next
return head
def _SCREAMING_SNAKE_CASE ( A : Node ) -> None:
"""simple docstring"""
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__snake_case : Dict = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(A )
print('Elements in Reverse:' )
print_reverse(A )
if __name__ == "__main__":
main() | 61 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 61 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class a_ ( UpperCamelCase_ ):
_snake_case = """WhisperFeatureExtractor"""
_snake_case = """WhisperTokenizer"""
def __init__(self , __a , __a) -> Tuple:
"""simple docstring"""
super().__init__(__a , __a)
__snake_case : Optional[Any] = self.feature_extractor
__snake_case : int = False
def SCREAMING_SNAKE_CASE__ (self , __a=None , __a=None , __a=True) -> List[Any]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a)
def __call__(self , *__a , **__a) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__a , **__a)
__snake_case : str = kwargs.pop('audio' , __a)
__snake_case : Tuple = kwargs.pop('sampling_rate' , __a)
__snake_case : List[Any] = kwargs.pop('text' , __a)
if len(__a) > 0:
__snake_case : str = args[0]
__snake_case : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
__snake_case : int = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a)
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(__a , **__a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : Optional[int] = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a="np") -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(__a , return_tensors=__a) | 61 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : list ) -> list:
"""simple docstring"""
__snake_case : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
__snake_case : Optional[Any] = True
for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : List[Any] = False
for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
__snake_case : Any = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 61 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=__a , dtype=jnp.bfloataa)
__snake_case ,__snake_case : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__a , from_pt=__a , dtype=jnp.bfloataa)
__snake_case : str = controlnet_params
__snake_case : List[Any] = 'bird'
__snake_case : List[Any] = jax.device_count()
__snake_case : int = pipe.prepare_text_inputs([prompts] * num_samples)
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png')
__snake_case : Dict = pipe.prepare_image_inputs([canny_image] * num_samples)
__snake_case : List[str] = jax.random.PRNGKey(0)
__snake_case : List[Any] = jax.random.split(__a , jax.device_count())
__snake_case : Tuple = replicate(__a)
__snake_case : Tuple = shard(__a)
__snake_case : Dict = shard(__a)
__snake_case : Optional[int] = pipe(
prompt_ids=__a , image=__a , params=__a , prng_seed=__a , num_inference_steps=5_0 , jit=__a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__snake_case : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
__snake_case : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__snake_case : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
__snake_case : Optional[int] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case ,__snake_case : List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=__a , dtype=jnp.bfloataa)
__snake_case ,__snake_case : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__a , from_pt=__a , dtype=jnp.bfloataa)
__snake_case : Optional[Any] = controlnet_params
__snake_case : List[str] = 'Chef in the kitchen'
__snake_case : List[Any] = jax.device_count()
__snake_case : Dict = pipe.prepare_text_inputs([prompts] * num_samples)
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png')
__snake_case : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples)
__snake_case : Any = jax.random.PRNGKey(0)
__snake_case : int = jax.random.split(__a , jax.device_count())
__snake_case : Any = replicate(__a)
__snake_case : Dict = shard(__a)
__snake_case : Optional[Any] = shard(__a)
__snake_case : int = pipe(
prompt_ids=__a , image=__a , params=__a , prng_seed=__a , num_inference_steps=5_0 , jit=__a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__snake_case : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
__snake_case : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
__snake_case : Tuple = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1E-2 | 61 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A )
else:
__snake_case : Tuple = timm.create_model('levit_128' , pretrained=A )
if hidden_sizes == 1_92:
__snake_case : int = timm.create_model('levit_192' , pretrained=A )
if hidden_sizes == 2_56:
__snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A )
if hidden_sizes == 3_84:
__snake_case : int = timm.create_model('levit_384' , pretrained=A )
from_model.eval()
__snake_case : str = LevitForImageClassificationWithTeacher(A ).eval()
__snake_case : int = OrderedDict()
__snake_case : Optional[Any] = from_model.state_dict()
__snake_case : Tuple = list(from_model.state_dict().keys() )
__snake_case : List[str] = list(our_model.state_dict().keys() )
print(len(A ) , len(A ) )
for i in range(len(A ) ):
__snake_case : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(A )
__snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) )
__snake_case : Union[str, Any] = from_model(A )
__snake_case : List[str] = our_model(A ).logits
assert torch.allclose(A , A ), "The model logits don't match the original one."
__snake_case : int = name
print(A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__snake_case : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
__snake_case : Tuple = 10_00
__snake_case : Dict = (1, num_labels)
__snake_case : List[str] = 'huggingface/label-files'
__snake_case : Any = num_labels
__snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(A ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A )
__snake_case : Dict = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__snake_case : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 61 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> str:
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0)) | 61 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : int = encoder_stride
__snake_case : List[str] = num_attention_outputs
__snake_case : Optional[Any] = embed_dim
__snake_case : Optional[Any] = embed_dim + 1
__snake_case : List[str] = resolution
__snake_case : Optional[int] = depths
__snake_case : List[Any] = hidden_sizes
__snake_case : List[str] = dim
__snake_case : Union[str, Any] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a)
__snake_case : int = model(__a , training=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.type_sequence_label_size
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : Optional[int] = model(__a , labels=__a , training=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : str = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFEfficientFormerModelTester(self)
__snake_case : List[Any] = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__a)
__snake_case : Union[str, Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a):
__snake_case : str = model_class(__a)
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a) , __a)
if hasattr(self.model_tester , 'encoder_seq_length'):
__snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
__snake_case : str = seq_length * self.model_tester.chunk_length
else:
__snake_case : Optional[int] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , __a)
__snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int:
"""simple docstring"""
__snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = TFEfficientFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
__snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a)
__snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
__snake_case : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
__snake_case : Dict = False
__snake_case : Optional[int] = True
__snake_case : Dict = model_class(__a)
__snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Dict = True
__snake_case : str = model_class(__a)
__snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case : Tuple = model_class(__a)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case : Tuple = model(__a)
self.assertTrue(outputs_dict is not None)
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : List[str] = model(**__a , training=__a)
# verify the logits
__snake_case : str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__snake_case : List[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : Optional[int] = model(**__a , training=__a)
# verify the logits
__snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) | 61 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''GLPNFeatureExtractor''']
__A = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 |
'''simple docstring'''
__A = {str(digit): digit**5 for digit in range(1_0)}
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution()) | 61 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.