code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] = 3_00 # TEMPERATURE (unit = K)
def A_ ( a , a , a , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 286
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "markuplm"
def __init__( self : List[Any] , snake_case_ : List[Any]=30_522 , snake_case_ : Tuple=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : str=12 , snake_case_ : Optional[Any]=3_072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : str=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=512 , snake_case_ : Tuple=2 , snake_case_ : List[str]=0.02 , snake_case_ : int=1E-1_2 , snake_case_ : Any=0 , snake_case_ : Any=0 , snake_case_ : str=2 , snake_case_ : Optional[int]=256 , snake_case_ : Optional[int]=1_024 , snake_case_ : str=216 , snake_case_ : List[str]=1_001 , snake_case_ : Optional[Any]=32 , snake_case_ : int=50 , snake_case_ : Tuple="absolute" , snake_case_ : Tuple=True , snake_case_ : int=None , **snake_case_ : str , ):
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : List[Any] = position_embedding_type
snake_case__ : Any = use_cache
snake_case__ : Union[str, Any] = classifier_dropout
# additional properties
snake_case__ : List[str] = max_depth
snake_case__ : int = max_xpath_tag_unit_embeddings
snake_case__ : Tuple = max_xpath_subs_unit_embeddings
snake_case__ : Dict = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : Tuple = xpath_unit_hidden_size
| 43
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = DDIMPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase = False
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
snake_case__ : Optional[Any] = DDIMScheduler()
snake_case__ : Any = {"""unet""": unet, """scheduler""": scheduler}
return components
def lowerCamelCase ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : str = torch.manual_seed(snake_case_ )
else:
snake_case__ : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : Union[str, Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = """cpu"""
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
snake_case__ : str = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
snake_case__ : Union[str, Any] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] )
snake_case__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCamelCase ( self : List[str] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase ( self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase ( self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = """google/ddpm-cifar10-32"""
snake_case__ : Optional[Any] = UNetaDModel.from_pretrained(snake_case_ )
snake_case__ : List[Any] = DDIMScheduler()
snake_case__ : List[str] = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ )
ddim.to(snake_case_ )
ddim.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = ddim(generator=snake_case_ , eta=0.0 , output_type="""numpy""" ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Optional[Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Dict = """google/ddpm-ema-bedroom-256"""
snake_case__ : Dict = UNetaDModel.from_pretrained(snake_case_ )
snake_case__ : Optional[int] = DDIMScheduler.from_pretrained(snake_case_ )
snake_case__ : Tuple = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ )
ddpm.to(snake_case_ )
ddpm.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : int = ddpm(generator=snake_case_ , output_type="""numpy""" ).images
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43
| 1
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 74
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 74
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : List[Any] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a : int = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def A_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def A_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any=False ):
if return_pvalue:
snake_case_ = pearsonr(lowercase_ , lowercase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
| 350
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
a : int = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path_factory.getbasetemp() / '''cache'''
snake_case_ = test_hf_cache_home / '''datasets'''
snake_case_ = test_hf_cache_home / '''metrics'''
snake_case_ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(__UpperCAmelCase ) )
@pytest.fixture(autouse=__UpperCAmelCase, scope='''session''' )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', __UpperCAmelCase )
| 72
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> Optional[Any]:
A_ = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
A_ = """"""
else:
A_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = dct.pop(UpperCAmelCase__ )
A_ = val
def UpperCAmelCase__ ( ) -> Dict:
A_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> List[str]:
A_ = BitConfig(
global_padding="""same""", layer_type="""bottleneck""", depths=(3, 4, 9), out_features=["""stage3"""], embedding_dynamic_padding=UpperCAmelCase__, )
A_ = ViTHybridConfig(backbone_config=UpperCAmelCase__, image_size=3_84, num_labels=10_00 )
A_ = False
# load original model from timm
A_ = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase__ )
A_ = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = """huggingface/label-files"""
A_ = """imagenet-1k-id2label.json"""
A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTHybridModel(UpperCAmelCase__ ).eval()
else:
A_ = ViTHybridForImageClassification(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# create image processor
A_ = create_transform(**resolve_data_config({}, model=UpperCAmelCase__ ) )
A_ = transform.transforms
A_ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A_ = ViTHybridImageProcessor(
do_resize=UpperCAmelCase__, size={"""shortest_edge""": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=UpperCAmelCase__, crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]}, do_normalize=UpperCAmelCase__, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
A_ = prepare_img()
A_ = transform(UpperCAmelCase__ ).unsqueeze(0 )
A_ = processor(UpperCAmelCase__, return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(UpperCAmelCase__, UpperCAmelCase__ )
# verify logits
with torch.no_grad():
A_ = model(UpperCAmelCase__ )
A_ = outputs.logits
print("""Predicted class:""", logits.argmax(-1 ).item() )
if base_model:
A_ = timm_model.forward_features(UpperCAmelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCAmelCase__, outputs.pooler_output, atol=1e-3 )
else:
A_ = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
__lowerCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 162
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63
| 0
|
from functools import lru_cache
@lru_cache
def UpperCamelCase__( UpperCamelCase__ : int )->int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
from __future__ import annotations
import time
import numpy as np
a__: Optional[Any] = [8, 5, 9, 7]
a__: Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a__: List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase ( self ):
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
A__ = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCamelCase ( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__lowerCAmelCase : Union[str, Any] ={
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__lowerCAmelCase : List[str] ={
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[str] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Tuple = bs[:]
__SCREAMING_SNAKE_CASE : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Dict = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : List[str] = char
return pairs
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any]="replace" , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :Any="<s>" , lowerCAmelCase__ :Optional[Any]="<unk>" , lowerCAmelCase__ :Tuple="<pad>" , lowerCAmelCase__ :Optional[Any]="<mask>" , lowerCAmelCase__ :Any=False , **lowerCAmelCase__ :str , ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Any = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : List[Any] = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : str = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
__SCREAMING_SNAKE_CASE : List[Any] = merges_handle.read().split('''\n''' )[1:-1]
__SCREAMING_SNAKE_CASE : Any = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __magic_name__( self :Tuple ) -> List[Any]:
return len(self.encoder )
def __magic_name__( self :Dict ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Any ) -> Dict:
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : str = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Optional[int] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : List[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = ''' '''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) )
return bpe_tokens
def __magic_name__( self :Dict , lowerCAmelCase__ :Dict ) -> int:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__( self :int , lowerCAmelCase__ :List[str] ) -> int:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__SCREAMING_SNAKE_CASE : Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __magic_name__( self :Any , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str]=False , **lowerCAmelCase__ :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = ''' ''' + text
return (text, kwargs)
| 9
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9
| 1
|
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int:
"""simple docstring"""
def get_dataset(A__ ):
UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(A__ )
UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A ( self : List[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return x * self.a + self.b
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
# Train baseline
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' )
accelerator.save_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' )
accelerator.save_state(UpperCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase__ )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ )
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3] )
UpperCamelCase = torch.tensor([2, 3, 4] )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(net.parameters() )
UpperCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase__ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def A ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
UpperCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(UpperCamelCase__ , scheduler.state_dict() )
def A ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = "/tmp/accelerate/state_checkpointing"
_lowerCamelCase : Any = DummyModel()
_lowerCamelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowerCamelCase : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCamelCase ,_lowerCamelCase : Optional[Any] = dummy_dataloaders()
_lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase : Union[str, Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase ,_lowerCamelCase : List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase : Any = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_lowerCamelCase : int = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_lowerCamelCase : Union[str, Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 249
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCamelCase = model.state_dict()
def to_tf_var_name(A__ ):
for patt, repl in iter(A__ ):
UpperCamelCase = name.replace(A__ , A__ )
return F"""bert/{name}"""
def create_tf_var(A__ , A__ , A__ ):
UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase = tf.get_variable(dtype=A__ , shape=tensor.shape , name=A__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase = to_tf_var_name(A__ )
UpperCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase = torch_tensor.T
UpperCamelCase = create_tf_var(tensor=A__ , name=A__ , session=A__ )
tf.keras.backend.set_value(A__ , A__ )
UpperCamelCase = session.run(A__ )
print(F"""Successfully created {tf_name}: {np.allclose(A__ , A__ )}""" )
UpperCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(A__ , os.path.join(A__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def __lowerCamelCase ( A__=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A__ , required=A__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=A__ , default=A__ , required=A__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=A__ , required=A__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=A__ , required=A__ , help='Directory in which to save tensorflow model' )
UpperCamelCase = parser.parse_args(A__ )
UpperCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=A__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 249
| 1
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_UpperCAmelCase : Dict = 'segformer.encoder.' + key
if key.startswith("backbone" ):
_UpperCAmelCase : Optional[int] = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : Dict = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(lowercase__ )-1}' )
if "norm" in key:
_UpperCAmelCase : Tuple = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : int = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(lowercase__ )-1}' )
if "layer_norm1" in key:
_UpperCAmelCase : Any = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : Dict = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Dict = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : str = key.replace(f'block{idx}' , f'block.{int(lowercase__ )-1}' )
if "attn.q" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[Any] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_UpperCAmelCase : int = key.replace("fc1" , "dense1" )
if "fc2" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Dict = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : List[str] = key.replace("linear_fuse.conv" , "linear_fuse" )
_UpperCAmelCase : Tuple = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : Optional[int] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Optional[int] = key.replace(f'linear_c{idx}' , f'linear_c.{int(lowercase__ )-1}' )
if key.startswith("head" ):
_UpperCAmelCase : Union[str, Any] = key.replace("head" , "classifier" )
_UpperCAmelCase : int = value
return new_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Optional[int] = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
_UpperCAmelCase : str = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Dict = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : List[Any] = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : int = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : str = kv_bias[
config.hidden_sizes[i] :
]
def __snake_case ( ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = SegformerConfig()
_UpperCAmelCase : Union[str, Any] = False
# set attributes based on model_name
_UpperCAmelCase : Union[str, Any] = 'huggingface/label-files'
if "segformer" in model_name:
_UpperCAmelCase : Tuple = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_UpperCAmelCase : List[Any] = 150
_UpperCAmelCase : Tuple = 'ade20k-id2label.json'
_UpperCAmelCase : str = (1, 150, 128, 128)
elif "city" in model_name:
_UpperCAmelCase : List[Any] = 19
_UpperCAmelCase : Dict = 'cityscapes-id2label.json'
_UpperCAmelCase : Dict = (1, 19, 128, 128)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = model_name[4:6]
_UpperCAmelCase : Any = 1_000
_UpperCAmelCase : Any = 'imagenet-1k-id2label.json'
_UpperCAmelCase : Union[str, Any] = (1, 1_000)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
_UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_UpperCAmelCase : Dict = [64, 128, 320, 512]
_UpperCAmelCase : List[str] = 256
elif size == "b2":
_UpperCAmelCase : int = [64, 128, 320, 512]
_UpperCAmelCase : str = 768
_UpperCAmelCase : Optional[Any] = [3, 4, 6, 3]
elif size == "b3":
_UpperCAmelCase : Any = [64, 128, 320, 512]
_UpperCAmelCase : Any = 768
_UpperCAmelCase : int = [3, 4, 18, 3]
elif size == "b4":
_UpperCAmelCase : Tuple = [64, 128, 320, 512]
_UpperCAmelCase : int = 768
_UpperCAmelCase : Union[str, Any] = [3, 8, 27, 3]
elif size == "b5":
_UpperCAmelCase : Dict = [64, 128, 320, 512]
_UpperCAmelCase : int = 768
_UpperCAmelCase : Any = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
_UpperCAmelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase__ , align=lowercase__ , do_random_crop=lowercase__ )
# prepare image
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=lowercase__ , return_tensors="pt" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
_UpperCAmelCase : Any = torch.load(lowercase__ , map_location=torch.device("cpu" ) )
else:
_UpperCAmelCase : Dict = torch.load(lowercase__ , map_location=torch.device("cpu" ) )['state_dict']
# rename keys
_UpperCAmelCase : Dict = rename_keys(lowercase__ , encoder_only=lowercase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
if encoder_only:
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : str = SegformerForImageClassification(lowercase__ )
else:
_UpperCAmelCase : Optional[int] = SegformerForSemanticSegmentation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
_UpperCAmelCase : List[Any] = model(lowercase__ )
_UpperCAmelCase : Optional[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_UpperCAmelCase : List[str] = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_UpperCAmelCase : int = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_UpperCAmelCase : Tuple = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_UpperCAmelCase : Optional[int] = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_UpperCAmelCase : Dict = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_UpperCAmelCase : Optional[Any] = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_UpperCAmelCase : Dict = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_UpperCAmelCase : Tuple = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_UpperCAmelCase : List[str] = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_UpperCAmelCase : Optional[int] = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
_UpperCAmelCase : Optional[int] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 362
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if token is not None:
_UpperCAmelCase : str = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
_UpperCAmelCase : Any = "636036"
_UpperCAmelCase : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
_UpperCAmelCase : Tuple = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json()
return result["workflow_runs"]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = get_daily_ci_runs(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCAmelCase : str = workflow_run["id"]
break
return workflow_run_id
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE__ )
if workflow_run_id is not None:
_UpperCAmelCase : Any = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCAmelCase : List[str] = artifacts_links[artifact_name]
download_artifact(
artifact_name=SCREAMING_SNAKE_CASE__ , artifact_url=SCREAMING_SNAKE_CASE__ , output_dir=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Any = {}
for artifact_name in artifact_names:
_UpperCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , f'{artifact_name}.zip' )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : str = {}
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
_UpperCAmelCase : List[str] = f.read().decode("UTF-8" )
return results
| 202
| 0
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Optional[int] , _A : List[str]=13 , _A : Tuple=7 , _A : Tuple=True , _A : List[str]=True , _A : Dict=99 , _A : Tuple=32 , _A : str=5 , _A : List[Any]=4 , _A : Tuple=37 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : Any=0.1 , _A : List[Any]=50 , _A : List[str]=0.02 , _A : List[str]=True , _A : Dict=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
__SCREAMING_SNAKE_CASE : Tuple = is_training
__SCREAMING_SNAKE_CASE : int = use_input_mask
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : str = use_labels
__SCREAMING_SNAKE_CASE : Dict = scope
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Dict , _A : List[Any] , _A : Tuple , _A : Tuple , _A : Dict , **_A : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BertGenerationEncoder(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE : Tuple = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , _A : int , _A : Any , _A : Union[str, Any] , _A : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , **_A : Any , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : int = BertGenerationEncoder(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Any , _A : Dict , _A : Optional[int] , _A : str , _A : Any , **_A : Any , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Dict = BertGenerationDecoder(config=_A ).to(_A ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE : Dict = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__SCREAMING_SNAKE_CASE : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : int = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : str , _A : int , _A : Optional[int] , _A : List[str] , *_A : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = BertGenerationDecoder(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCAmelCase_ = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BertGenerationEncoderTester(self )
__SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = '''bert'''
self.model_tester.create_and_check_model(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
_A , _A , _A , _A , _A , _A , )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(_A )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(_A )[0]
__SCREAMING_SNAKE_CASE : str = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 303
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 1
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "detr"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=100 , snake_case__=6 , snake_case__=2048 , snake_case__=8 , snake_case__=6 , snake_case__=2048 , snake_case__=8 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , **snake_case__ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int]= CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__a , __a ):
lowercase__ : Optional[Any]= backbone_config.get("model_type" )
lowercase__ : Optional[int]= CONFIG_MAPPING[backbone_model_type]
lowercase__ : Tuple= config_class.from_dict(__a )
# set timm attributes to None
lowercase__, lowercase__, lowercase__ : int= None, None, None
lowercase__ : List[Any]= use_timm_backbone
lowercase__ : List[str]= backbone_config
lowercase__ : int= num_channels
lowercase__ : List[str]= num_queries
lowercase__ : List[Any]= d_model
lowercase__ : Optional[int]= encoder_ffn_dim
lowercase__ : Union[str, Any]= encoder_layers
lowercase__ : Tuple= encoder_attention_heads
lowercase__ : str= decoder_ffn_dim
lowercase__ : Any= decoder_layers
lowercase__ : Optional[Any]= decoder_attention_heads
lowercase__ : Any= dropout
lowercase__ : Any= attention_dropout
lowercase__ : List[Any]= activation_dropout
lowercase__ : Optional[int]= activation_function
lowercase__ : Optional[Any]= init_std
lowercase__ : List[Any]= init_xavier_std
lowercase__ : str= encoder_layerdrop
lowercase__ : List[str]= decoder_layerdrop
lowercase__ : Union[str, Any]= encoder_layers
lowercase__ : str= auxiliary_loss
lowercase__ : List[Any]= position_embedding_type
lowercase__ : int= backbone
lowercase__ : Union[str, Any]= use_pretrained_backbone
lowercase__ : Any= dilation
# Hungarian matcher
lowercase__ : List[str]= class_cost
lowercase__ : Dict= bbox_cost
lowercase__ : Any= giou_cost
# Loss coefficients
lowercase__ : List[Any]= mask_loss_coefficient
lowercase__ : Tuple= dice_loss_coefficient
lowercase__ : Union[str, Any]= bbox_loss_coefficient
lowercase__ : List[Any]= giou_loss_coefficient
lowercase__ : int= eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(backbone_config=__a , **__a )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ : Any= self.backbone_config.to_dict()
lowercase__ : Optional[int]= self.__class__.model_type
return output
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 12
| 362
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ )
# create a imagenet -> id dictionary for easier use
lowercase__ : int= {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ : Tuple= int(snake_case__ )
lowercase__ : Union[str, Any]= dict(sorted(self.labels.items() ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
lowercase__ : List[Any]= list(snake_case__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 4.0 , snake_case__ = None , snake_case__ = 50 , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : List[Any]= len(snake_case__ )
lowercase__ : Optional[int]= self.transformer.config.sample_size
lowercase__ : List[str]= self.transformer.config.in_channels
lowercase__ : Any= randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , )
lowercase__ : Any= torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ : Tuple= torch.tensor(snake_case__ , device=self.device ).reshape(-1 )
lowercase__ : Any= torch.tensor([1000] * batch_size , device=self.device )
lowercase__ : Tuple= torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ : List[str]= latent_model_input[: len(snake_case__ ) // 2]
lowercase__ : int= torch.cat([half, half] , dim=0 )
lowercase__ : Union[str, Any]= self.scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= t
if not torch.is_tensor(snake_case__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ : List[str]= latent_model_input.device.type == "mps"
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : int= torch.floataa if is_mps else torch.floataa
else:
lowercase__ : Dict= torch.intaa if is_mps else torch.intaa
lowercase__ : Tuple= torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ : Dict= timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int= timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ : Union[str, Any]= self.transformer(
snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample
# perform guidance
if guidance_scale > 1:
lowercase__, lowercase__ : Tuple= noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 )
lowercase__ : str= uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ : Dict= torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ : Optional[int]= torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , snake_case__ , dim=1 )
else:
lowercase__ : int= noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ : List[Any]= self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
if guidance_scale > 1:
lowercase__, lowercase__ : Any= latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ : str= latent_model_input
lowercase__ : Dict= 1 / self.vae.config.scaling_factor * latents
lowercase__ : Any= self.vae.decode(snake_case__ ).sample
lowercase__ : Tuple= (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : List[Any]= samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : Optional[Any]= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case__ )
| 150
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : List[Any] = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : List[Any] = "deit"
def __init__( self : Dict , A : str=7_68 , A : str=12 , A : List[Any]=12 , A : List[str]=30_72 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : Optional[Any]=0.0 , A : List[Any]=0.02 , A : Optional[Any]=1e-12 , A : Any=2_24 , A : Tuple=16 , A : List[str]=3 , A : Tuple=True , A : Union[str, Any]=16 , **A : str , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
lowercase_ : Dict = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : Tuple = image_size
lowercase_ : Union[str, Any] = patch_size
lowercase_ : str = num_channels
lowercase_ : Dict = qkv_bias
lowercase_ : Dict = encoder_stride
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : int = version.parse("1.11" )
@property
def A ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : List[str] ) -> float:
return 1e-4
| 33
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[str] = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 200
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 200
| 1
|
from __future__ import annotations
import time
lowercase : Optional[int] = list[tuple[int, int]]
lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __snake_case :
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = pos_x
lowercase : str = pos_y
lowercase : Any = (pos_y, pos_x)
lowercase : int = goal_x
lowercase : int = goal_y
lowercase : Dict = parent
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,snake_case )
lowercase : Optional[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,snake_case )
lowercase : Union[str, Any] = [self.start]
lowercase : Any = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
while self.node_queue:
lowercase : int = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowercase : Dict = True
return self.retrace_path(snake_case )
lowercase : List[str] = self.get_successors(snake_case )
for node in successors:
self.node_queue.append(snake_case )
if not self.reached:
return [self.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = []
for action in delta:
lowercase : Any = parent.pos_x + action[1]
lowercase : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(snake_case ,snake_case ,self.target.pos_y ,self.target.pos_x ,snake_case ) )
return successors
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = node
lowercase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase : Tuple = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = BreadthFirstSearch(snake_case ,snake_case )
lowercase : Any = BreadthFirstSearch(snake_case ,snake_case )
lowercase : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase : Any = self.fwd_bfs.node_queue.pop(0 )
lowercase : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowercase : int = True
return self.retrace_bidirectional_path(
snake_case ,snake_case )
lowercase : Dict = current_bwd_node
lowercase : List[Any] = current_fwd_node
lowercase : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = self.fwd_bfs.retrace_path(snake_case )
lowercase : Union[str, Any] = self.bwd_bfs.retrace_path(snake_case )
bwd_path.pop()
bwd_path.reverse()
lowercase : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase : Tuple = (0, 0)
lowercase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase : Dict = time.time()
lowercase : Optional[int] = BreadthFirstSearch(init, goal)
lowercase : Dict = bfs.search()
lowercase : Dict = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowercase : List[str] = time.time()
lowercase : int = BidirectionalBreadthFirstSearch(init, goal)
lowercase : Any = bd_bfs.search()
lowercase : Dict = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 20
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Tuple , lowercase : List[Any] , lowercase : int ) -> List[Any]:
"""simple docstring"""
if index == r:
for j in range(lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
snake_case : Union[str, Any] = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case : Any = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 203
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : List[Any] , __snake_case : List[str]=7 , __snake_case : Union[str, Any]=3 , __snake_case : Any=30 , __snake_case : List[str]=4_00 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=True , __snake_case : Dict=[0.5, 0.5, 0.5] , __snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , __snake_case : Any=True , __snake_case : Any=1 / 2_55 , __snake_case : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def lowerCamelCase_ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : Any , __snake_case : Tuple , __snake_case : Optional[int]=False ):
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = self.size['''shortest_edge''']
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[0] )[0]
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
UpperCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Any ):
# Initialize image_processings
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = self.image_processing_class(do_resize=__snake_case , do_normalize=__snake_case , do_rescale=__snake_case )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCAmelCase_ = image_processing_a.pad(__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ = image_processing_a(__snake_case , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
# prepare image and target
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
UpperCAmelCase_ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def lowerCamelCase_ ( self : Any ):
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
UpperCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase_ = YolosImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
UpperCAmelCase_ = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 177
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177
| 1
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __snake_case ( lowerCamelCase__ ):
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , **snake_case__ , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =path_or_paths
UpperCAmelCase : Any =split if split or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else """train"""
UpperCAmelCase : Optional[Any] =features
UpperCAmelCase : Optional[Any] =cache_dir
UpperCAmelCase : List[Any] =keep_in_memory
UpperCAmelCase : Optional[int] =streaming
UpperCAmelCase : str =num_proc
UpperCAmelCase : Any =kwargs
@abstractmethod
def UpperCAmelCase__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class __snake_case ( lowerCamelCase__ ):
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , **snake_case__ , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =features
UpperCAmelCase : Union[str, Any] =cache_dir
UpperCAmelCase : Optional[Any] =keep_in_memory
UpperCAmelCase : str =streaming
UpperCAmelCase : List[Any] =num_proc
UpperCAmelCase : Tuple =kwargs
@abstractmethod
def UpperCAmelCase__ ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 348
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) ->Dict:
"""simple docstring"""
return f"gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy"
def _lowercase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowercase ( self : str , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Tuple=(4, 4, 6_4, 6_4) , UpperCAmelCase__ : Optional[int]=False ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Tuple = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ )
return image
def _lowercase ( self : Tuple , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Tuple="CompVis/stable-diffusion-v1-4" ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Dict = """bf16""" if fpaa else None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = FlaxUNetaDConditionModel.from_pretrained(
UpperCAmelCase__ , subfolder="""unet""" , dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ )
return model, params
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=(4, 7_7, 7_6_8) , UpperCAmelCase__ : Optional[Any]=False ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_latents(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.get_encoder_hidden_states(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = model.apply(
{"""params""": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : str = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _lowercase ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.get_latents(UpperCAmelCase__ , shape=(4, 4, 9_6, 9_6) , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_encoder_hidden_states(UpperCAmelCase__ , shape=(4, 7_7, 1_0_2_4) , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = model.apply(
{"""params""": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Dict = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
| 245
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_A : Any =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
_A : Optional[int] =list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_A : List[str] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
with open(lowerCAmelCase_ , """rb""" ) as f:
lowerCamelCase__ : Optional[Any] = Image.open(lowerCAmelCase_ )
return im.convert("""RGB""" )
@dataclass
class _lowercase :
a = field(
default=_UpperCamelCase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
a = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the training data."""} )
a = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
a = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
a = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase_ ( self: int ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class _lowercase :
a = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
a = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a = field(default=_UpperCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
a = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = torch.stack([example["""pixel_values"""] for example in examples] )
lowerCamelCase__ : Tuple = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE_ () -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowerCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ : Any = {}
if data_args.train_dir is not None:
lowerCamelCase__ : int = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
lowerCamelCase__ : int = os.path.join(data_args.validation_dir , """**""" )
lowerCamelCase__ : Any = load_dataset(
"""imagefolder""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : Any = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : Union[str, Any] = dataset['train'].train_test_split(data_args.train_val_split )
lowerCamelCase__ : List[Any] = split['train']
lowerCamelCase__ : int = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase__ : Union[str, Any] = dataset['train'].features['labels'].names
lowerCamelCase__ : Any = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
lowerCamelCase__ : List[str] = str(lowerCAmelCase_ )
lowerCamelCase__ : Any = label
# Load the accuracy metric from the datasets package
lowerCamelCase__ : List[Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Optional[int] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowerCamelCase__ : List[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowerCamelCase__ : str = image_processor.size['shortest_edge']
else:
lowerCamelCase__ : Union[str, Any] = (image_processor.size['height'], image_processor.size['width'])
lowerCamelCase__ : Any = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowerCamelCase__ : Dict = Compose(
[
RandomResizedCrop(lowerCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowerCamelCase__ : Union[str, Any] = Compose(
[
Resize(lowerCAmelCase_ ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(UpperCamelCase ):
lowerCamelCase__ : Any = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(UpperCamelCase ):
lowerCamelCase__ : Dict = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowerCamelCase__ : Tuple = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Any = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase_ )
# Initalize our trainer
lowerCamelCase__ : str = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
lowerCamelCase__ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Optional[Any] = last_checkpoint
lowerCamelCase__ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : List[str] = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
lowerCamelCase__ : Tuple = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 369
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : List[str] ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowercase ( _lowercase ):
a = """marian"""
a = ["""past_key_values"""]
a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=58_101 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[int]=4_096 , UpperCamelCase__: Tuple=16 , UpperCamelCase__: Dict=12 , UpperCamelCase__: Optional[Any]=4_096 , UpperCamelCase__: Any=16 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: str=58_100 , UpperCamelCase__: Tuple=False , UpperCamelCase__: Optional[Any]=58_100 , UpperCamelCase__: int=0 , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[str]=True , **UpperCamelCase__: str , ):
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : Tuple = decoder_vocab_size or vocab_size
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = d_model
lowerCamelCase__ : int = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : Dict = encoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : List[str] = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : int = dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : List[str] = activation_function
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : str = encoder_layerdrop
lowerCamelCase__ : Any = decoder_layerdrop
lowerCamelCase__ : List[str] = use_cache
lowerCamelCase__ : List[str] = encoder_layers
lowerCamelCase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _lowercase ( _lowercase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase_ ( self: Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : List[str] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ : Dict = {0: """batch"""}
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ : Any = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCamelCase__ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase_ ( self: Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super().outputs
else:
lowerCamelCase__ : Any = super(UpperCamelCase__ , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCamelCase_ ( self: str , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
lowerCamelCase__ : Any = seq_length if not self.use_past else 1
lowerCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[int] = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = common_inputs["""input_ids"""].shape
lowerCamelCase__ : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.num_attention_heads
lowerCamelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Tuple = decoder_seq_length + 3
lowerCamelCase__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
lowerCamelCase__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : Any = self.num_layers
lowerCamelCase__ : str = min(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
lowerCamelCase__ : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
lowerCamelCase__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[Any] = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_attention_heads
lowerCamelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] = common_inputs["""attention_mask"""].dtype
lowerCamelCase__ : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
lowerCamelCase__ : int = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ : List[Any] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Union[str, Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
lowerCamelCase__ : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Union[str, Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : str = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
lowerCamelCase__ : Tuple = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return 1e-4
| 129
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_a = logging.get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = question_encoder
_UpperCAmelCase = generator
_UpperCAmelCase = self.question_encoder
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if os.path.isfile(UpperCAmelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'question_encoder_tokenizer' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(UpperCAmelCase )
self.generator.save_pretrained(UpperCAmelCase )
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
_UpperCAmelCase = kwargs.pop('config' , UpperCAmelCase )
if config is None:
_UpperCAmelCase = RagConfig.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
UpperCAmelCase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
UpperCAmelCase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=UpperCAmelCase , generator=UpperCAmelCase )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.current_tokenizer(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.generator.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.generator.decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.question_encoder
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.generator
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "longest" , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ):
"""simple docstring"""
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , UpperCAmelCase , )
if max_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
text_target=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase , )
_UpperCAmelCase = labels['input_ids']
return model_inputs
| 39
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
__A = 42
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : List[str]=3 , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=("DownEncoderBlock2D",) , lowercase_ : Tuple=(64,) , lowercase_ : Dict=2 , lowercase_ : Optional[int]=32 , lowercase_ : Tuple="silu" , lowercase_ : Tuple=True , ) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCamelCase = layers_per_block
_UpperCamelCase = torch.nn.Convad(
_a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([])
# down
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(_a):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(_a) - 1
_UpperCamelCase = get_down_block(
_a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , )
self.down_blocks.append(_a)
# mid
_UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# out
_UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6)
_UpperCamelCase = nn.SiLU()
_UpperCamelCase = 2 * out_channels if double_z else out_channels
_UpperCamelCase = nn.Convad(block_out_channels[-1] , _a , 3 , padding=1)
_UpperCamelCase = False
def __UpperCAmelCase ( self : Tuple , lowercase_ : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = x
_UpperCamelCase = self.conv_in(_a)
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase_ : str):
def custom_forward(*lowercase_ : str):
return module(*_a)
return custom_forward
# down
if is_torch_version(">=" , "1.11.0"):
for down_block in self.down_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a) , _a , use_reentrant=_a)
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _a , use_reentrant=_a)
else:
for down_block in self.down_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_a) , _a)
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , _a)
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase = down_block(_a)
# middle
_UpperCamelCase = self.mid_block(_a)
# post-process
_UpperCamelCase = self.conv_norm_out(_a)
_UpperCamelCase = self.conv_act(_a)
_UpperCamelCase = self.conv_out(_a)
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Dict=3 , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=("UpDecoderBlock2D",) , lowercase_ : Union[str, Any]=(64,) , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=32 , lowercase_ : Optional[int]="silu" , lowercase_ : Optional[Any]="group" , ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCamelCase = layers_per_block
_UpperCamelCase = nn.Convad(
_a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([])
_UpperCamelCase = in_channels if norm_type == 'spatial' else None
# mid
_UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# up
_UpperCamelCase = list(reversed(_a))
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_a):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = i == len(_a) - 1
_UpperCamelCase = get_up_block(
_a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , )
self.up_blocks.append(_a)
_UpperCamelCase = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase = SpatialNorm(block_out_channels[0] , _a)
else:
_UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6)
_UpperCamelCase = nn.SiLU()
_UpperCamelCase = nn.Convad(block_out_channels[0] , _a , 3 , padding=1)
_UpperCamelCase = False
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = z
_UpperCamelCase = self.conv_in(_a)
_UpperCamelCase = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase_ : Optional[Any]):
def custom_forward(*lowercase_ : int):
return module(*_a)
return custom_forward
if is_torch_version(">=" , "1.11.0"):
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _a , _a , use_reentrant=_a)
_UpperCamelCase = sample.to(_a)
# up
for up_block in self.up_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a) , _a , _a , use_reentrant=_a)
else:
# middle
_UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _a , _a)
_UpperCamelCase = sample.to(_a)
# up
for up_block in self.up_blocks:
_UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_a) , _a , _a)
else:
# middle
_UpperCamelCase = self.mid_block(_a , _a)
_UpperCamelCase = sample.to(_a)
# up
for up_block in self.up_blocks:
_UpperCamelCase = up_block(_a , _a)
# post-process
if latent_embeds is None:
_UpperCamelCase = self.conv_norm_out(_a)
else:
_UpperCamelCase = self.conv_norm_out(_a , _a)
_UpperCamelCase = self.conv_act(_a)
_UpperCamelCase = self.conv_out(_a)
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : int=None , lowercase_ : List[str]="random" , lowercase_ : List[Any]=False , lowercase_ : str=True) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCamelCase = n_e
_UpperCamelCase = vq_embed_dim
_UpperCamelCase = beta
_UpperCamelCase = legacy
_UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
_UpperCamelCase = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap)))
_UpperCamelCase = self.used.shape[0]
_UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase = self.re_embed
_UpperCamelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.')
else:
_UpperCamelCase = n_e
_UpperCamelCase = sane_index_shape
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = inds.shape
assert len(_a) > 1
_UpperCamelCase = inds.reshape(ishape[0] , -1)
_UpperCamelCase = self.used.to(_a)
_UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase = match.argmax(-1)
_UpperCamelCase = match.sum(2) < 1
if self.unknown_index == "random":
_UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
_UpperCamelCase = self.unknown_index
return new.reshape(_a)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = inds.shape
assert len(_a) > 1
_UpperCamelCase = inds.reshape(ishape[0] , -1)
_UpperCamelCase = self.used.to(_a)
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase = 0 # simply set to zero
_UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a)
return back.reshape(_a)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = z.permute(0 , 2 , 3 , 1).contiguous()
_UpperCamelCase = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase = torch.argmin(torch.cdist(_a , self.embedding.weight) , dim=1)
_UpperCamelCase = self.embedding(_a).view(z.shape)
_UpperCamelCase = None
_UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
_UpperCamelCase = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
_UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
_UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
_UpperCamelCase = self.remap_to_used(_a)
_UpperCamelCase = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
_UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCAmelCase ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple) -> Tuple:
"""simple docstring"""
if self.remap is not None:
_UpperCamelCase = indices.reshape(shape[0] , -1) # add batch axis
_UpperCamelCase = self.unmap_to_all(_a)
_UpperCamelCase = indices.reshape(-1) # flatten again
# get quantized latent vectors
_UpperCamelCase = self.embedding(_a)
if shape is not None:
_UpperCamelCase = z_q.view(_a)
# reshape back to match original input shape
_UpperCamelCase = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class _UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[str]=False) -> str:
"""simple docstring"""
_UpperCamelCase = parameters
_UpperCamelCase = torch.chunk(_a , 2 , dim=1)
_UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0)
_UpperCamelCase = deterministic
_UpperCamelCase = torch.exp(0.5 * self.logvar)
_UpperCamelCase = torch.exp(self.logvar)
if self.deterministic:
_UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Optional[torch.Generator] = None) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = randn_tensor(
self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype)
_UpperCamelCase = self.mean + self.std * sample
return x
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str]=None) -> str:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCAmelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=[1, 2, 3]) -> Optional[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0])
_UpperCamelCase = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=_a)
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return self.mean
| 355
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( a__ = "https://www.worldometers.info/coronavirus" ) ->dict:
'''simple docstring'''
_UpperCamelCase = BeautifulSoup(requests.get(a__ ).text , "html.parser" )
_UpperCamelCase = soup.findAll("h1" )
_UpperCamelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(a__ , a__ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 63
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="audio-spectrogram-transformer"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=10 , UpperCamelCase_=10_24 , UpperCamelCase_=1_28 , **UpperCamelCase_ , ) -> Optional[int]:
super().__init__(**UpperCamelCase_ )
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[str] = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = patch_size
__lowercase : List[str] = qkv_bias
__lowercase : Union[str, Any] = frequency_stride
__lowercase : List[Any] = time_stride
__lowercase : Tuple = max_length
__lowercase : int = num_mel_bins
| 249
| 1
|
import argparse
import struct
import unittest
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Tuple = data
# Initialize hash values
UpperCamelCase_: str = [
0X6a09_e667,
0Xbb67_ae85,
0X3c6e_f372,
0Xa54f_f53a,
0X510e_527f,
0X9b05_688c,
0X1f83_d9ab,
0X5be0_cd19,
]
# Initialize round constants
UpperCamelCase_: List[Any] = [
0X428a_2f98,
0X7137_4491,
0Xb5c0_fbcf,
0Xe9b5_dba5,
0X3956_c25b,
0X59f1_11f1,
0X923f_82a4,
0Xab1c_5ed5,
0Xd807_aa98,
0X1283_5b01,
0X2431_85be,
0X550c_7dc3,
0X72be_5d74,
0X80de_b1fe,
0X9bdc_06a7,
0Xc19b_f174,
0Xe49b_69c1,
0Xefbe_4786,
0X0fc1_9dc6,
0X240c_a1cc,
0X2de9_2c6f,
0X4a74_84aa,
0X5cb0_a9dc,
0X76f9_88da,
0X983e_5152,
0Xa831_c66d,
0Xb003_27c8,
0Xbf59_7fc7,
0Xc6e0_0bf3,
0Xd5a7_9147,
0X06ca_6351,
0X1429_2967,
0X27b7_0a85,
0X2e1b_2138,
0X4d2c_6dfc,
0X5338_0d13,
0X650a_7354,
0X766a_0abb,
0X81c2_c92e,
0X9272_2c85,
0Xa2bf_e8a1,
0Xa81a_664b,
0Xc24b_8b70,
0Xc76c_51a3,
0Xd192_e819,
0Xd699_0624,
0Xf40e_3585,
0X106a_a070,
0X19a4_c116,
0X1e37_6c08,
0X2748_774c,
0X34b0_bcb5,
0X391c_0cb3,
0X4ed8_aa4a,
0X5b9c_ca4f,
0X682e_6ff3,
0X748f_82ee,
0X78a5_636f,
0X84c8_7814,
0X8cc7_0208,
0X90be_fffa,
0Xa450_6ceb,
0Xbef9_a3f7,
0Xc671_78f2,
]
UpperCamelCase_: Dict = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCAmelCase__ ( snake_case_ : int ):
UpperCamelCase_: int = b"""\x80""" + (b"""\x00""" * (63 - (len(lowerCamelCase__ ) + 8) % 64))
UpperCamelCase_: str = struct.pack(""">Q""" , (len(lowerCamelCase__ ) * 8) )
return data + padding + big_endian_integer
def lowerCAmelCase__ ( self : Union[str, Any] ):
# Convert into blocks of 64 bytes
UpperCamelCase_: str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase_: str = list(struct.unpack(""">16L""" , lowerCamelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase_: Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase_: Optional[Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase_: Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
UpperCamelCase_: str = self.ror(lowerCamelCase__ , 6 ) ^ self.ror(lowerCamelCase__ , 11 ) ^ self.ror(lowerCamelCase__ , 25 )
UpperCamelCase_: Any = (e & f) ^ ((~e & 0Xffff_ffff) & g)
UpperCamelCase_: int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
UpperCamelCase_: str = self.ror(lowerCamelCase__ , 2 ) ^ self.ror(lowerCamelCase__ , 13 ) ^ self.ror(lowerCamelCase__ , 22 )
UpperCamelCase_: List[Any] = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase_: Optional[Any] = (sa + maj) % 0X1_0000_0000
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
UpperCamelCase_: int = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase_: Any = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
UpperCamelCase_: Dict = """""".join([hex(lowerCamelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple ):
return 0Xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ):
import hashlib
UpperCamelCase_: Union[str, Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowerCamelCase__ ).hash , hashlib.shaaaa(lowerCamelCase__ ).hexdigest() )
def A__ ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase_: Dict = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCamelCase_: Dict = parser.parse_args()
UpperCamelCase_: Optional[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCamelCase_: Dict = f.read()
else:
UpperCamelCase_: List[Any] = bytes(lowercase_ , """utf-8""" )
print(SHAaaa(lowercase_ ).hash )
if __name__ == "__main__":
main()
| 357
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = """ybelkada/fonts"""
def A__ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
requires_backends(lowerCamelCase , ["""torch"""] )
_check_torch_version()
UpperCamelCase_: Tuple = image_tensor.unsqueeze(0 )
UpperCamelCase_: Any = torch.nn.functional.unfold(lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_: int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase , lowerCamelCase , -1 )
UpperCamelCase_: Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def A__ ( lowerCamelCase , lowerCamelCase = 36 , lowerCamelCase = "black" , lowerCamelCase = "white" , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = None , lowerCamelCase = None , ) -> Image.Image:
requires_backends(lowerCamelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_: List[str] = textwrap.TextWrapper(width=80 )
UpperCamelCase_: Optional[int] = wrapper.wrap(text=lowerCamelCase )
UpperCamelCase_: List[str] = """\n""".join(lowerCamelCase )
if font_bytes is not None and font_path is None:
UpperCamelCase_: List[Any] = io.BytesIO(lowerCamelCase )
elif font_path is not None:
UpperCamelCase_: List[Any] = font_path
else:
UpperCamelCase_: Tuple = hf_hub_download(lowerCamelCase , """Arial.TTF""" )
UpperCamelCase_: Optional[Any] = ImageFont.truetype(lowerCamelCase , encoding="""UTF-8""" , size=lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_: str = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase ) )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = temp_draw.textbbox((0, 0) , lowerCamelCase , lowerCamelCase )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_: Optional[int] = text_width + left_padding + right_padding
UpperCamelCase_: List[str] = text_height + top_padding + bottom_padding
UpperCamelCase_: Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase )
UpperCamelCase_: Optional[Any] = ImageDraw.Draw(lowerCamelCase )
draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase , fill=lowerCamelCase , font=lowerCamelCase )
return image
def A__ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
requires_backends(lowerCamelCase , """vision""" )
# Convert to PIL image if necessary
UpperCamelCase_: List[str] = to_pil_image(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = render_text(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_: Tuple = max(header_image.width , image.width )
UpperCamelCase_: Tuple = int(image.height * (new_width / image.width) )
UpperCamelCase_: Dict = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_: str = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_: Optional[Any] = to_numpy_array(lowerCamelCase )
if infer_channel_dimension_format(lowerCamelCase ) == ChannelDimension.LAST:
UpperCamelCase_: Tuple = to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""flattened_patches"""]
def __init__( self : int , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : int = 2048 , snake_case_ : bool = False , **snake_case_ : Any , ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
UpperCamelCase_: Tuple = do_normalize
UpperCamelCase_: List[Any] = do_convert_rgb
UpperCamelCase_: Tuple = max_patches
UpperCamelCase_: Tuple = is_vqa
def lowerCAmelCase__ ( self : int , snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : dict , **snake_case_ : Tuple ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
UpperCamelCase_: int = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST )
UpperCamelCase_: List[str] = torch.from_numpy(snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[Any] = patch_size["""height"""], patch_size["""width"""]
UpperCamelCase_, UpperCamelCase_: Tuple = get_image_size(snake_case_ )
# maximize scale s.t.
UpperCamelCase_: List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_: Any = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 )
UpperCamelCase_: List[str] = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 )
UpperCamelCase_: int = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_: Optional[Any] = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_: str = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_: List[str] = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = patches.shape
UpperCamelCase_: List[str] = patches_shape[1]
UpperCamelCase_: Optional[Any] = patches_shape[2]
UpperCamelCase_: List[str] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_: Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_: Optional[Any] = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] )
UpperCamelCase_: Optional[int] = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_: Union[str, Any] = row_ids.to(torch.floataa )
UpperCamelCase_: str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Tuple = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_: List[Any] = to_numpy_array(snake_case_ )
return result
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple ):
if image.dtype == np.uinta:
UpperCamelCase_: List[str] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_: str = np.mean(snake_case_ )
UpperCamelCase_: str = np.std(snake_case_ )
UpperCamelCase_: str = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : ImageInput , snake_case_ : Optional[str] = None , snake_case_ : bool = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: Optional[Any] = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_: Optional[int] = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_: Tuple = self.is_vqa
if kwargs.get("""data_format""" , snake_case_ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
UpperCamelCase_: Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: str = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
UpperCamelCase_: List[Any] = kwargs.pop("""font_bytes""" , snake_case_ )
UpperCamelCase_: List[Any] = kwargs.pop("""font_path""" , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [header_text] * len(snake_case_ )
UpperCamelCase_: str = [
render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ )
for i, image in enumerate(snake_case_ )
]
if do_normalize:
UpperCamelCase_: Union[str, Any] = [self.normalize(image=snake_case_ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_: str = [
self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ )
for image in images
]
# create attention mask in numpy
UpperCamelCase_: List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_: Optional[Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ )
return encoded_outputs
| 223
| 0
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : list[int] , __lowercase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowercase ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCAmelCase = []
_UpperCAmelCase = -1
for i in range(__lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowercase )
if len(__lowercase ) > 0:
_UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
_UpperCAmelCase = 0
_UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : list[int] ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
for i in range(__lowercase ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
__SCREAMING_SNAKE_CASE :Dict = 4
__SCREAMING_SNAKE_CASE :int = [2, 5, 3, 7]
__SCREAMING_SNAKE_CASE :List[Any] = [0, 0, 0, 0]
__SCREAMING_SNAKE_CASE :Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__SCREAMING_SNAKE_CASE :Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 22
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : Any ) -> Any:
lowercase : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase : Optional[int] = 1024
lowercase : Dict = 4096
lowercase : Union[str, Any] = 24
lowercase : str = 16
lowercase : Dict = [5, 11, 17, 23]
lowercase : Any = [256, 512, 1024, 1024]
lowercase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase : List[Any] = True
lowercase : Union[str, Any] = 150
lowercase : Dict = "huggingface/label-files"
lowercase : Optional[Any] = "ade20k-id2label.json"
lowercase : Optional[int] = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type="dataset" ) ) , "r" ) )
lowercase : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[int]:
lowercase : Optional[Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
lowercase : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
lowercase : Optional[int] = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
lowercase : List[Any] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
lowercase : str = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase : Any = name.replace("proj" , "projection" )
if "blocks" in name:
lowercase : Tuple = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
lowercase : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase : str = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
lowercase : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
lowercase : Optional[Any] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
lowercase : Optional[int] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
lowercase : Any = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
lowercase : Tuple = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
lowercase : int = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
lowercase : Union[str, Any] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
lowercase : int = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase : Union[str, Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowercase : Tuple = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
lowercase : Union[str, Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
lowercase : List[Any] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
lowercase : Optional[int] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
lowercase : Union[str, Any] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase : Optional[int] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase : Any = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
lowercase : Optional[int] = name.replace("bn" , "batch_norm" )
if "head" in name:
lowercase : Union[str, Any] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
lowercase : List[str] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
lowercase : Optional[Any] = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> Any:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowercase : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase : int = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : int = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ) -> int:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Tuple ) -> Tuple:
lowercase , lowercase : Tuple = get_dpt_config(__snake_case )
# load original state_dict from URL
lowercase : List[Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__snake_case )
# rename keys
for key in state_dict.copy().keys():
lowercase : Any = state_dict.pop(__snake_case )
lowercase : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__snake_case , __snake_case )
# load HuggingFace model
lowercase : List[str] = DPTForSemanticSegmentation(__snake_case ) if "ade" in checkpoint_url else DPTForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Check outputs on an image
lowercase : Any = 480 if "ade" in checkpoint_url else 384
lowercase : Optional[Any] = DPTImageProcessor(size=__snake_case )
lowercase : Any = prepare_img()
lowercase : Union[str, Any] = image_processor(__snake_case , return_tensors="pt" )
# forward pass
lowercase : Optional[int] = model(**__snake_case ).logits if "ade" in checkpoint_url else model(**__snake_case ).predicted_depth
# Assert logits
lowercase : Optional[int] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
lowercase : List[str] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __snake_case , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __snake_case )
)
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__snake_case , )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_A : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 202
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return np.maximum(0 , lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 248
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 248
| 1
|
from typing import Any
class __lowerCAmelCase :
def __init__( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCAmelCase :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase = temp.next
print()
def UpperCamelCase ( self : Any , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = Node(snake_case__ )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 133
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_=1024 ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = list(zip(snake_case_ , snake_case_ ) )
_UpperCAmelCase , _UpperCAmelCase = sorted_examples[0]
def is_too_big(snake_case_ ):
return tok(snake_case_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_UpperCAmelCase = new_src + " " + src
_UpperCAmelCase = new_tgt + " " + tgt
if is_too_big(snake_case_ ) or is_too_big(snake_case_ ): # cant fit, finalize example
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = src, tgt
else: # can fit, keep adding
_UpperCAmelCase , _UpperCAmelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
return finished_src, finished_tgt
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = Path(snake_case_ )
save_path.mkdir(exist_ok=snake_case_ )
for split in ["train"]:
_UpperCAmelCase , _UpperCAmelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
_UpperCAmelCase = [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
_UpperCAmelCase = [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
_UpperCAmelCase , _UpperCAmelCase = pack_examples(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
print(f"""packed {split} split from {len(snake_case_ )} examples -> {len(snake_case_ )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(snake_case_ ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(snake_case_ ) )
for split in ["val", "test"]:
_UpperCAmelCase , _UpperCAmelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(snake_case_ , save_path / f"""{split}.source""" )
shutil.copyfile(snake_case_ , save_path / f"""{split}.target""" )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=snake_case_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=snake_case_ , default=128 )
parser.add_argument("--data_dir" , type=snake_case_ )
parser.add_argument("--save_path" , type=snake_case_ )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(snake_case_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 133
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __lowerCAmelCase ( UpperCamelCase__ ) -> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__UpperCAmelCase =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 369
|
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ="▁"
__UpperCAmelCase ={"vocab_file": "prophetnet.tokenizer"}
__UpperCAmelCase ={
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__UpperCAmelCase ={
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__UpperCAmelCase ={
"microsoft/xprophetnet-large-wiki100-cased": 5_1_2,
}
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
__lowerCamelCase = collections.OrderedDict()
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
__lowerCamelCase = reader.readlines()
for index, token in enumerate(UpperCamelCase__ ):
__lowerCamelCase = token.rstrip('''\n''' )
__lowerCamelCase = index
return vocab
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : Any =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] =["input_ids", "attention_mask"]
def __init__( self : int , a : List[str] , a : Optional[int]="[SEP]" , a : int="[SEP]" , a : str="[SEP]" , a : List[Any]="[UNK]" , a : List[Any]="[PAD]" , a : str="[CLS]" , a : List[str]="[MASK]" , a : Optional[Dict[str, Any]] = None , **a : str , ):
"""simple docstring"""
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , cls_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
__lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__lowerCamelCase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
__lowerCamelCase = f"""[unused{i}]"""
__lowerCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__lowerCamelCase = 12
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(a )
def __getstate__( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str ):
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : int ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Union[str, Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = ''''''.join(a ).replace(a , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : int , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 237
| 0
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class A ( UpperCamelCase_ ):
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : str=None , lowercase_ : int=True , lowercase_ : List[Any]=None , **lowercase_ : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =parent
_lowerCamelCase : Optional[int] =config_class
_lowerCamelCase : Optional[Any] =has_text_modality
_lowerCamelCase : Optional[Any] =kwargs
_lowerCamelCase : Any =common_properties
def lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Any =self.config_class(**self.inputs_dict )
_lowerCamelCase : List[str] =(
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase_ , lowercase_ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase_ ):
try:
setattr(lowercase_ , lowercase_ , lowercase_ )
self.parent.assertEqual(
getattr(lowercase_ , lowercase_ ) , lowercase_ , msg=F'''`{name} value {idx} expected, but was {getattr(lowercase_ , lowercase_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase_ ):
try:
_lowerCamelCase : Optional[int] =self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase_ , lowercase_ ) , lowercase_ , msg=F'''`{name} value {idx} expected, but was {getattr(lowercase_ , lowercase_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =self.config_class(**self.inputs_dict )
_lowerCamelCase : Any =json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[str] =os.path.join(lowercase_ , 'config.json' )
config_first.to_json_file(lowercase_ )
_lowerCamelCase : Optional[Any] =self.config_class.from_json_file(lowercase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCamelCase : int =self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase_ )
_lowerCamelCase : int =self.config_class.from_pretrained(lowercase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase ( self : str ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.config_class(**self.inputs_dict )
_lowerCamelCase : List[Any] ='test'
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any =os.path.join(lowercase_ , lowercase_ )
config_first.save_pretrained(lowercase_ )
_lowerCamelCase : Union[str, Any] =self.config_class.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowerCamelCase : Dict =3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_class.is_composition:
return
_lowerCamelCase : Any =self.config_class()
self.parent.assertIsNotNone(lowercase_ )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] =copy.deepcopy(lowercase_ )
_lowerCamelCase : Union[str, Any] =self.config_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(lowercase_ , lowercase_ ) != value:
wrong_values.append((key, getattr(lowercase_ , lowercase_ ), value) )
if len(lowercase_ ) > 0:
_lowerCamelCase : Dict ='\n'.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 199
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : Dict =[]
for i in range(self.num_layers ):
_lowerCamelCase : Union[str, Any] =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Dict =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Optional[int] =resnets
_lowerCamelCase : Dict =attentions
if self.add_downsample:
_lowerCamelCase : Union[str, Any] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCamelCase : Union[str, Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : Union[str, Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Optional[int] =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCamelCase : str =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Union[str, Any] =resnets
if self.add_downsample:
_lowerCamelCase : List[str] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=True ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =()
for resnet in self.resnets:
_lowerCamelCase : Tuple =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Tuple =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : List[str] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Tuple =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : List[str] =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : int =resnets
_lowerCamelCase : Dict =attentions
if self.add_upsample:
_lowerCamelCase : str =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCamelCase : Optional[int] =res_hidden_states_tuple[-1]
_lowerCamelCase : Union[str, Any] =res_hidden_states_tuple[:-1]
_lowerCamelCase : Any =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Optional[Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : int =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : str =resnets
if self.add_upsample:
_lowerCamelCase : List[str] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any=True ) -> int:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowerCamelCase : List[str] =res_hidden_states_tuple[-1]
_lowerCamelCase : str =res_hidden_states_tuple[:-1]
_lowerCamelCase : List[str] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Union[str, Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCamelCase : Any =[]
for _ in range(self.num_layers ):
_lowerCamelCase : Optional[int] =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : List[Any] =resnets
_lowerCamelCase : List[str] =attentions
def __call__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[str]=True ) -> int:
"""simple docstring"""
_lowerCamelCase : Dict =self.resnets[0](lowercase_ , lowercase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCamelCase : Tuple =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[str] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
return hidden_states
| 199
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["image_processor", "tokenizer"]
__UpperCAmelCase : List[Any] = "BlipImageProcessor"
__UpperCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : List[Any] = False
super().__init__(lowerCamelCase , lowerCamelCase )
__snake_case : int = self.image_processor
def __call__( self : List[Any] , lowerCamelCase : ImageInput = None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__snake_case : str = self.tokenizer
__snake_case : Any = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
# add pixel_values
__snake_case : Any = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
else:
__snake_case : str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def __snake_case ( self : List[Any] , *lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Tuple , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Any ) -> int:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : str ) -> Tuple:
__snake_case : Union[str, Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 356
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_snake_case : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=8 ):
__snake_case : List[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__snake_case : Optional[int] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : MultilingualCLIP , lowerCamelCase : XLMRobertaTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , lowerCamelCase : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
__snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int ) -> Any:
if latents is None:
__snake_case : str = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__snake_case : Optional[int] = latents.to(lowerCamelCase )
__snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str=None , ) -> List[str]:
__snake_case : Tuple = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
__snake_case : Optional[int] = self.tokenizer(
lowerCamelCase , padding="max_length" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : List[str] = text_inputs.input_ids
__snake_case : List[Any] = self.tokenizer(lowerCamelCase , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__snake_case : Any = text_input_ids.to(lowerCamelCase )
__snake_case : List[str] = text_inputs.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : List[str] = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
__snake_case : List[Any] = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[int] = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Any = [""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='
F' {type(lowerCamelCase )}.' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__snake_case : int = negative_prompt
__snake_case : Dict = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : Dict = uncond_input.input_ids.to(lowerCamelCase )
__snake_case : List[Any] = uncond_input.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : Tuple = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Dict = negative_prompt_embeds.shape[1]
__snake_case : int = negative_prompt_embeds.repeat(1 , lowerCamelCase )
__snake_case : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
__snake_case : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1]
__snake_case : Tuple = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
__snake_case : str = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
__snake_case : Optional[int] = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
__snake_case : List[Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__snake_case : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __snake_case ( self : List[str] , lowerCamelCase : Dict=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device(F'cuda:{gpu_id}' )
__snake_case : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : int=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__snake_case : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__snake_case : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__snake_case , __snake_case : List[Any] = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
__snake_case , __snake_case : Optional[int] = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
__snake_case : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ) -> Optional[int]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 100 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = len(lowerCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' )
__snake_case : Any = self._execution_device
__snake_case : Any = batch_size * num_images_per_prompt
__snake_case : Any = guidance_scale > 1.0
__snake_case , __snake_case , __snake_case : Optional[Any] = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : str = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : Dict = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
__snake_case : Tuple = self.scheduler.timesteps
__snake_case : Union[str, Any] = self.unet.config.in_channels
__snake_case , __snake_case : Tuple = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
__snake_case : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : int = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__snake_case : Optional[Any] = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
__snake_case , __snake_case : Any = noise_pred.split(latents.shape[1] , dim=1 )
__snake_case , __snake_case : Union[str, Any] = noise_pred.chunk(2 )
__snake_case , __snake_case : str = variance_pred.chunk(2 )
__snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__snake_case : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__snake_case , __snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : str = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
__snake_case : str = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__snake_case : Union[str, Any] = image * 0.5 + 0.5
__snake_case : Union[str, Any] = image.clamp(0 , 1 )
__snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : str = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 134
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = XLNetTokenizer
A__ : Any = XLNetTokenizerFast
A__ : int = True
A__ : Optional[Any] = True
def snake_case_ ( self : Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : List[str] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Optional[Any] ):
__lowercase : str = """<s>"""
__lowercase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(_lowerCamelCase ) , 1006 )
def snake_case_ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
__lowercase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] )
__lowercase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__lowercase : int = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case_ ( self : Dict ):
__lowercase : List[str] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
__lowercase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def snake_case_ ( self : List[str] ):
__lowercase : Tuple = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
__lowercase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def snake_case_ ( self : Optional[Any] ):
__lowercase : Dict = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
__lowercase : int = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
__lowercase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
__lowercase : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
__lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self : int ):
__lowercase : Tuple = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 156
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCAmelCase__ : int = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : int = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """sgugger/tiny-distilbert-classification"""
UpperCAmelCase__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
UpperCAmelCase__ : Tuple = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : List[str] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Dict = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : List[Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : int = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : List[Any] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Tuple = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = """sshleifer/tinier_bart"""
UpperCAmelCase__ : str = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tinier_bart"""
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Dict = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
UpperCAmelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_lowerCamelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_lowerCamelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_lowerCamelCase , """env.csv""" ) , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Dict = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """env.csv""" ) ).exists() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCamelCase ):
self.assertTrue(hasattr(_lowerCamelCase , """sequential""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """cumulative""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """current""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , """log.txt""" ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = PyTorchBenchmark(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , """log.txt""" ) ).exists() )
| 171
| 0
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetaDModel
lowercase_ = "sample"
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =4
lowerCamelCase__: Optional[Any] =3
lowerCamelCase__: Any =(32, 32)
lowerCamelCase__: str =floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.tensor([10]).to(UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] ={
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
lowerCamelCase__: Dict =self.dummy_input
return init_dict, inputs_dict
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetaDModel
lowercase_ = "sample"
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =4
lowerCamelCase__: Optional[Any] =4
lowerCamelCase__: Union[str, Any] =(32, 32)
lowerCamelCase__: int =floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor([10]).to(UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return (4, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
return (4, 32, 32)
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple ={
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
lowerCamelCase__: int =self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Tuple =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCAmelCase_)
lowerCamelCase__: int =model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
model.to(UpperCAmelCase_)
lowerCamelCase__: Tuple =model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Tuple =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
model_accelerate.to(UpperCAmelCase_)
model_accelerate.eval()
lowerCamelCase__: List[Any] =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0) , )
lowerCamelCase__: List[str] =noise.to(UpperCAmelCase_)
lowerCamelCase__: Dict =torch.tensor([10] * noise.shape[0]).to(UpperCAmelCase_)
lowerCamelCase__: List[str] =model_accelerate(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase__ , lowerCamelCase__: Optional[int] =UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_)
model_normal_load.to(UpperCAmelCase_)
model_normal_load.eval()
lowerCamelCase__: Union[str, Any] =model_normal_load(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update")
model.eval()
model.to(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
lowerCamelCase__: int =noise.to(UpperCAmelCase_)
lowerCamelCase__: Dict =torch.tensor([10] * noise.shape[0]).to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: int =model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowerCamelCase__: List[str] =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase__: List[str] =torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3))
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = UNetaDModel
lowercase_ = "sample"
@property
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Any=(32, 32)) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int =4
lowerCamelCase__: str =3
lowerCamelCase__: Dict =floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: Tuple =torch.tensor(batch_size * [10]).to(dtype=torch.intaa , device=UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple ={
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
lowerCamelCase__: str =self.dummy_input
return init_dict, inputs_dict
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCAmelCase_)
lowerCamelCase__: Tuple =self.dummy_input
lowerCamelCase__: Union[str, Any] =floats_tensor((4, 3) + (256, 256)).to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =noise
lowerCamelCase__: Dict =model(**UpperCAmelCase_)
assert image is not None, "Make sure output is not None"
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256")
model.to(UpperCAmelCase_)
lowerCamelCase__: Dict =4
lowerCamelCase__: Optional[Any] =3
lowerCamelCase__: str =(256, 256)
lowerCamelCase__: Dict =torch.ones((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =torch.tensor(batch_size * [1E-4]).to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: List[Any] =model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowerCamelCase__: str =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__: Dict =torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
model.to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =4
lowerCamelCase__: Optional[int] =3
lowerCamelCase__: Optional[Any] =(32, 32)
lowerCamelCase__: int =torch.ones((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.tensor(batch_size * [1E-4]).to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: int =model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowerCamelCase__: Optional[int] =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__: Dict =torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2))
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
pass
| 273
|
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
assert x is not None
assert y is not None
lowerCamelCase__: Any =len(__a )
lowerCamelCase__: int =len(__a )
# declaring the array for storing the dp values
lowerCamelCase__: List[Any] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__: str =1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__: str =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__: Any =""
lowerCamelCase__ , lowerCamelCase__: str =m, n
while i > 0 and j > 0:
lowerCamelCase__: Union[str, Any] =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__: Any =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A , __A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 273
| 1
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63
| 1
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
if start < end:
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase , lowerCamelCase = _in_place_partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += _in_place_quick_sort(lowerCamelCase__ , lowerCamelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCamelCase__ , p + 1 , lowerCamelCase__ )
return count
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase = start - 1
for index in range(lowerCamelCase__ , lowerCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCamelCase = new_pivot_index + 1
lowerCamelCase = a[new_pivot_index]
lowerCamelCase = a[index]
lowerCamelCase = temp
lowerCamelCase = a[new_pivot_index + 1]
lowerCamelCase = a[end]
lowerCamelCase = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_00 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : Optional[int] = 0, 1 # mean and standard deviation
UpperCAmelCase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[Any] = np.load(outfile)
UpperCAmelCase : Optional[Any] = len(M) - 1
UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 66
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowercase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , A ) -> Any:
'''simple docstring'''
super().__init__()
lowerCamelCase = model
lowerCamelCase = 2
lowerCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __A ( self ) -> int:
'''simple docstring'''
pass
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = LongformerModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = LightningModel(lowerCamelCase__ )
lowerCamelCase = torch.load(lowerCamelCase__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 66
| 1
|
'''simple docstring'''
from string import ascii_uppercase
__A : Tuple = {str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
if isinstance(A__ , A__ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(A__ , A__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(A__ , A__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCAmelCase_ : Any = """"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while div != 1:
lowerCAmelCase_, lowerCAmelCase_ : Any = divmod(A__ , A__ )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase_ : Dict = ALPHABET_VALUES[str(A__ )]
else:
lowerCAmelCase_ : List[str] = str(A__ )
new_value += actual_value
lowerCAmelCase_ : Union[str, Any] = num // base
lowerCAmelCase_ : int = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(A__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 120
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A : List[Any] = True
except ImportError:
__A : int = False
__A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( A__ : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase : ArgumentParser ) -> int:
lowerCAmelCase_ : Optional[int] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowerCamelCase , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowerCamelCase , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowerCamelCase )
def __init__( self : List[str] , lowerCamelCase : bool , lowerCamelCase : str , lowerCamelCase : Any=None , *lowerCamelCase : List[str] ) -> Optional[Any]:
lowerCAmelCase_ : int = testing
lowerCAmelCase_ : Union[str, Any] = testing_file
lowerCAmelCase_ : Tuple = path
def __lowercase ( self : Tuple ) -> int:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase_ : int = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowerCamelCase ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCAmelCase_ : List[Any] = (
Path(lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase_ : Dict = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCAmelCase_ : Tuple = json.load(lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase , extra_context=lowerCamelCase , )
lowerCAmelCase_ : List[str] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCAmelCase_ : Tuple = json.load(lowerCamelCase )
lowerCAmelCase_ : str = configuration["""lowercase_modelname"""]
lowerCAmelCase_ : List[str] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F'{directory}/configuration.json' )
lowerCAmelCase_ : Dict = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : Optional[int] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : List[str] = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase_ : Union[str, Any] = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCamelCase )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowerCamelCase : Any ):
with open(lowerCamelCase , """r""" ) as f:
lowerCAmelCase_ : List[str] = f.readlines()
with open(lowerCamelCase , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : List[str] ):
# Create temp file
lowerCAmelCase_, lowerCAmelCase_ : int = mkstemp()
lowerCAmelCase_ : List[Any] = False
with fdopen(lowerCamelCase , """w""" ) as new_file:
with open(lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase )
if line_to_copy_below in line:
lowerCAmelCase_ : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase , lowerCamelCase )
# Remove original file
remove(lowerCamelCase )
# Move new file
move(lowerCamelCase , lowerCamelCase )
def skip_units(lowerCamelCase : Optional[int] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase : Any ):
with open(lowerCamelCase ) as datafile:
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : str = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase_ : Dict = line.split("""\"""" )[1]
lowerCAmelCase_ : int = skip_units(lowerCamelCase )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase_ : Any = line.split("""\"""" )[1]
lowerCAmelCase_ : Tuple = skip_units(lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase_ : int = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase )
remove(lowerCamelCase )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowerCamelCase )
| 120
| 1
|
"""simple docstring"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(snake_case__ ) * abs(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 365
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : List[str] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , )-> Tuple:
"""simple docstring"""
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase__ = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
| 269
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :str = logging.get_logger(__name__)
A_ :Tuple = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""git_vision_model"""
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=3072 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__="quick_gelu" , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : str =hidden_size
__UpperCamelCase : Union[str, Any] =intermediate_size
__UpperCamelCase : Any =num_hidden_layers
__UpperCamelCase : Any =num_attention_heads
__UpperCamelCase : Union[str, Any] =num_channels
__UpperCamelCase : Optional[Any] =patch_size
__UpperCamelCase : int =image_size
__UpperCamelCase : str =initializer_range
__UpperCamelCase : Optional[int] =attention_dropout
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : str =hidden_act
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[Any] =cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
__UpperCamelCase : int =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Tuple ="""git"""
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=30522 , lowerCamelCase__=768 , lowerCamelCase__=6 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1024 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=101 , lowerCamelCase__=102 , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
if vision_config is None:
__UpperCamelCase : str ={}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
__UpperCamelCase : Optional[int] =GitVisionConfig(**lowerCamelCase__ )
__UpperCamelCase : List[Any] =vocab_size
__UpperCamelCase : int =hidden_size
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : str =num_attention_heads
__UpperCamelCase : int =hidden_act
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : List[str] =hidden_dropout_prob
__UpperCamelCase : Tuple =attention_probs_dropout_prob
__UpperCamelCase : Tuple =max_position_embeddings
__UpperCamelCase : List[Any] =initializer_range
__UpperCamelCase : Optional[int] =layer_norm_eps
__UpperCamelCase : Optional[int] =position_embedding_type
__UpperCamelCase : List[str] =use_cache
__UpperCamelCase : Any =tie_word_embeddings
__UpperCamelCase : int =num_image_with_embedding
__UpperCamelCase : List[Any] =bos_token_id
__UpperCamelCase : Any =eos_token_id
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =copy.deepcopy(self.__dict__ )
__UpperCamelCase : Any =self.vision_config.to_dict()
__UpperCamelCase : Optional[Any] =self.__class__.model_type
return output
| 71
|
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327
| 0
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
snake_case = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
snake_case = get_tests_dir("""fixtures/vocab.json""")
snake_case = get_tests_dir("""fixtures""")
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = 0
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConfig()
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "vocab.json" ) )
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# save in new folder
processor.save_pretrained(UpperCAmelCase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "r" ) as f:
SCREAMING_SNAKE_CASE : List[Any] = json.load(UpperCAmelCase_ )
config_dict.pop("processor_class" )
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" ) as f:
f.write(json.dumps(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# save in new folder
processor.save_pretrained(UpperCAmelCase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "r" ) as f:
SCREAMING_SNAKE_CASE : int = json.load(UpperCAmelCase_ )
config_dict.pop("processor_class" )
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" ) as f:
f.write(json.dumps(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(UpperCAmelCase_ )
# copy relevant files
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[str] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE : str = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _A ( self : List[str] ):
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Any = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCAmelCase_ , "vocab.txt" )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : List[str] = CustomTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _A ( self : Optional[Any] ):
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = False
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = False
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = '''AutoFeatureExtractor'''
UpperCamelCase_ : Any = '''AutoTokenizer'''
UpperCamelCase_ : List[Any] = False
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _A ( cls : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def _A ( cls : Tuple ):
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase_ , "test-processor" ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase_ , "test-processor-org" ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE : Tuple = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _A ( self : Optional[int] ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(UpperCAmelCase_ , "vocab.txt" )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Any = CustomTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
SCREAMING_SNAKE_CASE : Dict = Repository(UpperCAmelCase_ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(UpperCAmelCase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCAmelCase_ , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(UpperCAmelCase_ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 319
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 512,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PegasusTokenizer
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'''
f''' {type(UpperCAmelCase_ )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 319
| 1
|
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ :int = [2, 1, 2, -1]
UpperCamelCase__ :Tuple = [1, 2, 3, 4]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = len(self.first_signal )
UpperCamelCase__ :Tuple = len(self.second_signal )
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , UpperCamelCase_ )
# create a zero matrix of max_length x max_length
UpperCamelCase__ :Dict = [[0] * max_length for i in range(UpperCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase_ ):
UpperCamelCase__ :Any = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase_ )
for j, item in enumerate(UpperCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase__ :str = np.matmul(np.transpose(UpperCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 97
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 97
| 1
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _a ( a :str ) -> Optional[Any]:
a = torch.exp(lowerCAmelCase__ )
a = torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class lowercase_( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
super().__init__()
a = config.output_attentions
a = config.output_hidden_states
a = nn.ModuleList([BertLayer(A__ ) for _ in range(config.num_hidden_layers )] )
a = nn.ModuleList([BertHighway(A__ ) for _ in range(config.num_hidden_layers )] )
a = [-1 for _ in range(config.num_hidden_layers )]
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ) ->int:
"""simple docstring"""
if (type(A__ ) is float) or (type(A__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
a = x
else:
a = x
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[Any]:
"""simple docstring"""
a = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=None , ) ->Union[str, Any]:
"""simple docstring"""
a = ()
a = ()
a = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
a = all_hidden_states + (hidden_states,)
a = layer_module(
A__ , A__ , head_mask[i] , A__ , A__ )
a = layer_outputs[0]
if self.output_attentions:
a = all_attentions + (layer_outputs[1],)
a = (hidden_states,)
if self.output_hidden_states:
a = current_outputs + (all_hidden_states,)
if self.output_attentions:
a = current_outputs + (all_attentions,)
a = self.highway[i](A__ )
# logits, pooled_output
if not self.training:
a = highway_exit[0]
a = entropy(A__ )
a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
a = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ , i + 1 )
else:
a = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
a = all_hidden_states + (hidden_states,)
a = (hidden_states,)
if self.output_hidden_states:
a = outputs + (all_hidden_states,)
if self.output_attentions:
a = outputs + (all_attentions,)
a = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase_( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Any , __UpperCAmelCase : str ) ->Any:
"""simple docstring"""
super().__init__(A__ )
a = config
a = BertEmbeddings(A__ )
a = DeeBertEncoder(A__ )
a = BertPooler(A__ )
self.init_weights()
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def __lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return self.embeddings.word_embeddings
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] ) ->Dict:
"""simple docstring"""
a = value
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ) ->Optional[int]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__ )
@add_start_docstrings_to_model_forward(A__ )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=None , ) ->int:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
a = input_ids.size()
elif inputs_embeds is not None:
a = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a = torch.ones(A__ , device=A__ )
if encoder_attention_mask is None:
a = torch.ones(A__ , device=A__ )
if token_type_ids is None:
a = torch.zeros(A__ , dtype=torch.long , device=A__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a = self.get_extended_attention_mask(A__ , A__ , A__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
a = encoder_attention_mask[:, None, None, :]
a = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
a = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a = self.get_head_mask(A__ , self.config.num_hidden_layers )
a = self.embeddings(
input_ids=A__ , position_ids=A__ , token_type_ids=A__ , inputs_embeds=A__ )
a = self.encoder(
A__ , attention_mask=A__ , head_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
a = encoder_outputs[0]
a = self.pooler(A__ )
a = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase_( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
a = message
a = exit_layer # start from 1!
class lowercase_( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
super().__init__()
a = BertPooler(A__ )
a = nn.Dropout(config.hidden_dropout_prob )
a = nn.Linear(config.hidden_size , config.num_labels )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = encoder_outputs[0]
a = self.pooler(A__ )
# "return" pooler_output
# BertModel
a = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
a = bmodel_output[1]
a = self.dropout(A__ )
a = self.classifier(A__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase_( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : str ) ->Any:
"""simple docstring"""
super().__init__(A__ )
a = config.num_labels
a = config.num_hidden_layers
a = DeeBertModel(A__ )
a = nn.Dropout(config.hidden_dropout_prob )
a = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A__ )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : Tuple=False , ) ->Union[str, Any]:
"""simple docstring"""
a = self.num_layers
try:
a = self.bert(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
a = outputs[1]
a = self.dropout(A__ )
a = self.classifier(A__ )
a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a = e.message
a = e.exit_layer
a = outputs[0]
if not self.training:
a = entropy(A__ )
a = []
a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a = []
for highway_exit in outputs[-1]:
a = highway_exit[0]
if not self.training:
highway_logits_all.append(A__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A__ )
if train_highway:
a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a = (loss,) + outputs
if not self.training:
a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 358
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="<sep>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Any="<cls>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
a = jieba
a = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Optional[int] ) ->str:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
if self.remove_space:
a = ''' '''.join(inputs.strip().split() )
else:
a = inputs
a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
a = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
a = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
a = outputs.lower()
return outputs
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = self.preprocess_text(__UpperCAmelCase )
a = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
a = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a = cur_pieces[1:]
else:
a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
a = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 26
| 0
|
import numpy as np
from transformers import Pipeline
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.max(snake_case__ ,axis=-1 ,keepdims=snake_case__ )
_SCREAMING_SNAKE_CASE = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=snake_case__ )
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: Union[str, Any] , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
if "second_text" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=None ):
'''simple docstring'''
return self.tokenizer(UpperCAmelCase_ , text_pair=UpperCAmelCase_ , return_tensors=self.framework )
def UpperCamelCase ( self: int , UpperCAmelCase_: Tuple ):
'''simple docstring'''
return self.model(**UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_outputs.logits[0].numpy()
_SCREAMING_SNAKE_CASE = softmax(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.model.config.idalabel[best_class]
_SCREAMING_SNAKE_CASE = probabilities[best_class].item()
_SCREAMING_SNAKE_CASE = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> int:
UpperCamelCase__ : Optional[Any] = git.Repo(search_parent_directories=__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = {
'''repo_id''': str(__UpperCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__UpperCAmelCase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=4 )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Dict:
if params.n_gpu <= 0:
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Union[str, Any] = -1
UpperCamelCase__ : str = True
UpperCamelCase__ : Dict = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCamelCase__ : Optional[int] = int(os.environ['''WORLD_SIZE'''] )
UpperCamelCase__ : Any = int(os.environ['''N_GPU_NODE'''] )
UpperCamelCase__ : Optional[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
UpperCamelCase__ : Optional[int] = params.world_size // params.n_gpu_per_node
UpperCamelCase__ : int = params.global_rank // params.n_gpu_per_node
UpperCamelCase__ : Any = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = 1
UpperCamelCase__ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCamelCase__ : Any = params.node_id == 0 and params.local_rank == 0
UpperCamelCase__ : Optional[int] = params.n_nodes > 1
# summary
UpperCamelCase__ : List[Any] = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 366
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = test_results.split(''' ''' )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __UpperCAmelCase ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCamelCase__ : List[Any] = line
UpperCamelCase__ : List[Any] = False
return failures
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = title
UpperCamelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCamelCase__ : Optional[Any] = doc_test_results['''success''']
UpperCamelCase__ : str = doc_test_results['''failures''']
UpperCamelCase__ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase__ : List[Any] = doc_test_results
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self._time_spent]
UpperCamelCase__ : str = 0
for time in time_spent:
UpperCamelCase__ : List[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__magic_name__ ) == 1:
UpperCamelCase__ : List[Any] = [0, 0, time_parts[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__magic_name__ )}h{int(__magic_name__ )}m{int(__magic_name__ )}s"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 40
UpperCamelCase__ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__magic_name__, __magic_name__ )}
UpperCamelCase__ : List[str] = ''''''
for category, failures in category_failures.items():
if len(__magic_name__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__magic_name__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__magic_name__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=__magic_name__, )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCamelCase__ : List[str] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCamelCase__ : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCamelCase__ : List[Any] = value[:200] + ''' [Truncated]''' if len(__magic_name__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCamelCase__ : Union[str, Any] = job_name
UpperCamelCase__ : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCamelCase__ : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCamelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCamelCase__ : Optional[int] = sorted(self.doc_test_results.items(), key=lambda __magic_name__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCamelCase__ : Any = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCamelCase__ : Optional[Any] = job_result['''failures''']
UpperCamelCase__ : Optional[Any] = self.get_reply_blocks(__magic_name__, __magic_name__, __magic_name__, text=__magic_name__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f"Results for {job}", blocks=__magic_name__, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCamelCase__ : Tuple = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCamelCase__ : Optional[int] = requests.get(__UpperCAmelCase ).json()
UpperCamelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCamelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[int] = {}
if os.path.exists(__UpperCAmelCase ):
UpperCamelCase__ : Dict = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}." ) from e
return _artifact
def lowerCAmelCase_ ( ) -> str:
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = name
UpperCamelCase__ : int = []
def __str__( self ) -> Tuple:
"""simple docstring"""
return self.name
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCamelCase__ : Dict[str, Artifact] = {}
UpperCamelCase__ : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
UpperCamelCase__ : Union[str, Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 247
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Tuple , _lowercase : Optional[int]=None , **_lowercase : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _lowercase , )
super().__init__(args=_lowercase , **_lowercase )
| 219
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
| 1
|
def lowerCAmelCase_ ( _lowercase : str , _lowercase : int) -> list:
"""simple docstring"""
a__ : Optional[Any] = word.split()
def justify(_lowercase : list , _lowercase : int , _lowercase : int) -> str:
a__ : int = max_width - width
a__ : str = len(_lowercase)
if len(_lowercase) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
a__ : Union[str, Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
a__ : Union[str, Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
a__ : Tuple = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase):
num_spaces_between_words_list[i] += 1
a__ : int = []
for i in range(_lowercase):
# add the word
aligned_words_list.append(line[i])
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """)
# just add the last word to the sentence
aligned_words_list.append(line[-1])
# join the aligned words list to form a justified line
return "".join(_lowercase)
a__ : Optional[int] = []
a__ : list[str] = []
a__ : int = 0
for word in words:
if width + len(_lowercase) + len(_lowercase) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase)
width += len(_lowercase)
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase))
# reset new line and new width
a__ , a__ : Any = [word], len(_lowercase)
a__ : str = max_width - width - len(_lowercase)
answer.append(""" """.join(_lowercase) + (remaining_spaces + 1) * """ """)
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 266
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = ProphetNetTokenizer
__lowerCAmelCase :Any = False
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
a__ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = """UNwant\u00E9d,running"""
a__ : Dict = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = self.tokenizer_class(self.vocab_file )
a__ : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : int = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
a__ : Dict = {}
for i, token in enumerate(__lowercase ):
a__ : Optional[Any] = i
a__ : str = WordpieceTokenizer(vocab=__lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a__ : Optional[Any] = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
a__ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
a__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowercase )
a__ : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowercase )
a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowercase )
a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 266
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE = '''Wav2Vec2FeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
A__ = self.feature_extractor
A__ = False
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase,**__lowerCamelCase ):
try:
return super().from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''',__SCREAMING_SNAKE_CASE,)
A__ = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
A__ = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
return cls(feature_extractor=__SCREAMING_SNAKE_CASE,tokenizer=__SCREAMING_SNAKE_CASE )
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A__ = kwargs.pop('''raw_speech''' )
else:
A__ = kwargs.pop('''audio''',__SCREAMING_SNAKE_CASE )
A__ = kwargs.pop('''sampling_rate''',__SCREAMING_SNAKE_CASE )
A__ = kwargs.pop('''text''',__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A__ = self.feature_extractor(__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,sampling_rate=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if text is not None:
A__ = self.tokenizer(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
if self._in_target_context_manager:
return self.current_processor.pad(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
A__ = kwargs.pop('''input_features''',__SCREAMING_SNAKE_CASE )
A__ = kwargs.pop('''labels''',__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if labels is not None:
A__ = self.tokenizer.pad(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@contextmanager
def UpperCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 193
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
| 0
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ : List[Any] = False
class a__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a = torch.manual_seed(0 )
a = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 364
|
import math
import sys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if number != int(__UpperCamelCase):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
a = [-1] * (number + 1)
a = 0
for i in range(1 , number + 1):
a = sys.maxsize
a = int(math.sqrt(__UpperCamelCase))
for j in range(1 , root + 1):
a = 1 + answers[i - (j**2)]
a = min(__UpperCamelCase , __UpperCamelCase)
a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180
| 0
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 2
lowerCamelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase_ )
if n > 1:
factors.append(UpperCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 2
lowerCamelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase_ )
if n > 1:
factors.append(UpperCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
from __future__ import annotations
_snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = len(_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] < arr[j]:
_lowerCAmelCase : Optional[int] = arr[j]
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for i, outer in enumerate(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCAmelCase : str = inner
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = len(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
_lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(_lowerCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCAmelCase : List[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 300
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 1
|
"""simple docstring"""
def _A (__a ) -> list:
"""simple docstring"""
if len(__a ) < 2:
return collection
def circle_sort_util(__a , __a , __a ) -> bool:
SCREAMING_SNAKE_CASE_ : Tuple = False
if low == high:
return swapped
SCREAMING_SNAKE_CASE_ : Any = low
SCREAMING_SNAKE_CASE_ : List[Any] = high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE_ : str = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE_ : Optional[int] = circle_sort_util(__a , __a , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = circle_sort_util(__a , mid + 1 , __a )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE_ : str = True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = circle_sort_util(__a , 0 , len(__a ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase_ : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase_ : Tuple = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 91
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 269
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : list[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,) -> Optional[int]:
if config_name_or_path is None:
__lowerCamelCase : List[Any] = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Union[str, Any] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__lowerCamelCase : Tuple = RagConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Tuple = gen_config
__lowerCamelCase : List[Any] = question_encoder_config
__lowerCamelCase : str = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase ,_lowerCAmelCase ,config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 208
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_snake_case : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class a (datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : int = 1_0000
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[datasets.Features] = None
class a (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ParquetConfig
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self : int , lowerCamelCase : Tuple ) -> Dict:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__snake_case : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase , (str, list, tuple) ):
__snake_case : Dict = data_files
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : str = [dl_manager.iter_files(lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : Any = [dl_manager.iter_files(lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCamelCase ):
with open(lowerCamelCase , "rb" ) as f:
__snake_case : Dict = datasets.Features.from_arrow_schema(pq.read_schema(lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def __snake_case ( self : List[Any] , lowerCamelCase : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : Optional[int] = table_cast(lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def __snake_case ( self : Optional[int] , lowerCamelCase : Dict ) -> List[str]:
__snake_case : List[str] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase ) ):
with open(lowerCamelCase , "rb" ) as f:
__snake_case : int = pq.ParquetFile(lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : int = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(lowerCamelCase )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCamelCase )}: {e}' )
raise
| 134
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
_snake_case : Tuple = 100
_snake_case : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_snake_case : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def lowerCAmelCase_ ( __lowerCamelCase ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case : set[int] = set()
__snake_case : int
__snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCAmelCase_ ( __lowerCamelCase = 5_0_0_0 ):
for number_to_partition in range(1 , __lowerCamelCase ):
if len(partition(__lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134
| 1
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase__ = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ = dict(zip(vocab, range(len(vocab))))
lowercase__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(tmpdirname)
lowercase__ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
lowercase__ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
lowercase__ = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
lowercase__ = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase__ = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase__ = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
lowercase__ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowercase__ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 96
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while b:
_A , _A : List[str] = b, a % b
return a
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b )
def lowerCAmelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' )
if __name__ == "__main__":
main()
| 26
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
a__ : Optional[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : str = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , x.transpose()))
a__ : List[Any] = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = np.random.randn(3 , 4)
a__ : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , transpose(_SCREAMING_SNAKE_CASE).numpy()))
a__ : List[str] = np.random.randn(3 , 4 , 5)
a__ : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)).numpy()))
@require_tf
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[int] = np.random.randn(3 , 4)
a__ : Dict = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , transpose(_SCREAMING_SNAKE_CASE).numpy()))
a__ : List[Any] = np.random.randn(3 , 4 , 5)
a__ : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)).numpy()))
@require_flax
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = np.random.randn(3 , 4)
a__ : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , np.asarray(transpose(_SCREAMING_SNAKE_CASE))))
a__ : Dict = np.random.randn(3 , 4 , 5)
a__ : int = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)))))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3))))
a__ : Any = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5))))
@require_torch
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : int = np.random.randn(3 , 4)
a__ : str = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , reshape(_SCREAMING_SNAKE_CASE , (4, 3)).numpy()))
a__ : Dict = np.random.randn(3 , 4 , 5)
a__ : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , reshape(_SCREAMING_SNAKE_CASE , (12, 5)).numpy()))
@require_tf
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : int = np.random.randn(3 , 4)
a__ : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , reshape(_SCREAMING_SNAKE_CASE , (4, 3)).numpy()))
a__ : Optional[Any] = np.random.randn(3 , 4 , 5)
a__ : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , reshape(_SCREAMING_SNAKE_CASE , (12, 5)).numpy()))
@require_flax
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[Any] = np.random.randn(3 , 4)
a__ : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3)))))
a__ : List[str] = np.random.randn(3 , 4 , 5)
a__ : List[str] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5)))))
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[str] = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , np.squeeze(_SCREAMING_SNAKE_CASE)))
a__ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2)))
@require_torch
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[Any] = np.random.randn(1 , 3 , 4)
a__ : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , squeeze(_SCREAMING_SNAKE_CASE).numpy()))
a__ : int = np.random.randn(1 , 4 , 1 , 5)
a__ : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , squeeze(_SCREAMING_SNAKE_CASE , axis=2).numpy()))
@require_tf
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : int = np.random.randn(1 , 3 , 4)
a__ : Dict = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , squeeze(_SCREAMING_SNAKE_CASE).numpy()))
a__ : Dict = np.random.randn(1 , 4 , 1 , 5)
a__ : List[str] = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , squeeze(_SCREAMING_SNAKE_CASE , axis=2).numpy()))
@require_flax
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = np.random.randn(1 , 3 , 4)
a__ : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE))))
a__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5)
a__ : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2))))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Dict = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1)))
@require_torch
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Tuple = np.random.randn(3 , 4)
a__ : str = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1).numpy()))
@require_tf
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = np.random.randn(3 , 4)
a__ : List[str] = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1).numpy()))
@require_flax
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = np.random.randn(3 , 4)
a__ : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1))))
| 360
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A_ ( A__ , A__ , A__ ) -> Any:
# Construct model
if gpta_config_file == "":
a__ : Optional[int] = GPTaConfig()
else:
a__ : List[str] = GPTaConfig.from_json_file(A__ )
a__ : List[str] = GPTaModel(A__ )
# Load weights from numpy
load_tf_weights_in_gpta(A__ , A__ , A__ )
# Save pytorch-model
a__ : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ : Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , A__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowercase : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 225
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( lowerCamelCase ):
lowercase = """Salesforce/blip-image-captioning-base"""
lowercase = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase = """image_captioner"""
lowercase = AutoModelForVisionaSeq
lowercase = ["""image"""]
lowercase = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
| 321
|
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321
| 1
|
from __future__ import annotations
import bisect
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
A__ = len(SCREAMING_SNAKE_CASE__ )
while lo < hi:
A__ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
A__ = mid + 1
else:
A__ = mid
return lo
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
A__ = len(SCREAMING_SNAKE_CASE__ )
while lo < hi:
A__ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
A__ = mid + 1
else:
A__ = mid
return lo
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ) -> int | None:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
A__ = left + (right - left) // 2
A__ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
A__ = midpoint - 1
else:
A__ = midpoint + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ) -> int | None:
'''simple docstring'''
A__ = bisect.bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if index != len(SCREAMING_SNAKE_CASE__ ) and sorted_collection[index] == item:
return index
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int | None:
'''simple docstring'''
if right < left:
return None
A__ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , midpoint - 1 )
else:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , midpoint + 1 , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = sorted(int(item) for item in user_input.split(","))
lowercase_ = int(input("Enter a single number to be found in the list:\n"))
lowercase_ = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 369
|
import numpy as np
from transformers import Pipeline
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
A__ = np.max(SCREAMING_SNAKE_CASE__ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
A__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Dict,**lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = {}
if "second_text" in kwargs:
A__ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def snake_case__ ( self : List[Any],lowercase_ : int,lowercase_ : Optional[int]=None )-> List[str]:
'''simple docstring'''
return self.tokenizer(lowercase_,text_pair=lowercase_,return_tensors=self.framework )
def snake_case__ ( self : str,lowercase_ : Dict )-> List[str]:
'''simple docstring'''
return self.model(**lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Optional[int] )-> Dict:
'''simple docstring'''
A__ = model_outputs.logits[0].numpy()
A__ = softmax(lowercase_ )
A__ = np.argmax(lowercase_ )
A__ = self.model.config.idalabel[best_class]
A__ = probabilities[best_class].item()
A__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 282
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->int:
'''simple docstring'''
if not postfix_notation:
return 0
a : List[str] = {"+", "-", "*", "/"}
a : list[Any] = []
for token in postfix_notation:
if token in operations:
a, a : int = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "sequence-classification"
def __init__( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
lowercase_ = Namespace(**lowerCAmelCase_)
lowercase_ = glue_output_modes[hparams.task]
lowercase_ = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , self.mode)
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ = outputs[0]
lowercase_ = self.trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.hparams
lowercase_ = processors[args.task]()
lowercase_ = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowercase_ = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowercase_ = convert_examples_to_features(
lowerCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = """dev""" if mode == """test""" else mode
lowercase_ = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
lowercase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ , lowercase_ = outputs[:2]
lowercase_ = logits.detach().cpu().numpy()
lowercase_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowercase_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowercase_ = np.argmax(lowerCAmelCase_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = np.squeeze(lowerCAmelCase_)
lowercase_ = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase_ , lowerCAmelCase_)}
lowercase_ = dict(results.items())
lowercase_ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : list):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
lowercase_ = GLUETransformer(__lowerCAmelCase )
lowercase_ = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowerCAmelCase ) )
lowercase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 136
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE = object
def _SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> List[str]:
pass
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger("transformers-cli/serving")
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
A__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(a__ , args.host , args.port , args.workers )
class UpperCAmelCase_ ( _a ):
lowercase__ = 42
class UpperCAmelCase_ ( _a ):
lowercase__ = 42
lowercase__ = 42
class UpperCAmelCase_ ( _a ):
lowercase__ = 42
class UpperCAmelCase_ ( _a ):
lowercase__ = 42
class UpperCAmelCase_ ( _a ):
@staticmethod
def __magic_name__ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=__lowerCAmelCase , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=__lowerCAmelCase , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=__lowerCAmelCase , default=8_888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=__lowerCAmelCase , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=__lowerCAmelCase , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=__lowerCAmelCase , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=__lowerCAmelCase , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=__lowerCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
A__ = pipeline
A__ = host
A__ = port
A__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F"""Serving model over {host}:{port}""" )
A__ = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["POST"] , ),
] , timeout=600 , )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__ ( self : List[Any] , snake_case_ : str = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , snake_case_ : Any = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ) -> Dict:
'''simple docstring'''
try:
A__ = self._pipeline.tokenizer.tokenize(__lowerCAmelCase )
if return_ids:
A__ = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
return ServeTokenizeResult(tokens=__lowerCAmelCase , tokens_ids=__lowerCAmelCase )
else:
return ServeTokenizeResult(tokens=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(__lowerCAmelCase )} )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Tuple = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , snake_case_ : List[str] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , snake_case_ : Dict = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , ) -> Optional[int]:
'''simple docstring'''
try:
A__ = self._pipeline.tokenizer.decode(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return ServeDeTokenizeResult(model="" , text=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(__lowerCAmelCase )} )
async def __magic_name__ ( self : Tuple , snake_case_ : Optional[Any]=Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ) -> int:
'''simple docstring'''
if len(__lowerCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A__ = self._pipeline(__lowerCAmelCase )
return ServeForwardResult(output=__lowerCAmelCase )
except Exception as e:
raise HTTPException(500 , {"error": str(__lowerCAmelCase )} )
| 371
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
A__ = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
A__ = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase_ )
# Let's go
A__ = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
A__ = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 230
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowerCAmelCase_ :Optional[int] = """"""
while len(lowercase__ ) % 3 != 0:
lowerCAmelCase_ :str = """0""" + bin_string
lowerCAmelCase_ :Optional[int] = [
bin_string[index : index + 3]
for index in range(len(lowercase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCAmelCase_ :Tuple = 0
for index, val in enumerate(lowercase__ ):
oct_val += int(2 ** (2 - index) * int(lowercase__ ) )
oct_string += str(lowercase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84
|
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image:
'''simple docstring'''
def brightness(lowercase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__UpperCAmelCase = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 84
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
self.check_model_type(__a)
def __call__( self, __a, __a = None, **__a, ):
'''simple docstring'''
if "text_queries" in kwargs:
_lowerCAmelCase : str = kwargs.pop("text_queries")
if isinstance(__a, (str, Image.Image)):
_lowerCAmelCase : int = {"image": image, "candidate_labels": candidate_labels}
else:
_lowerCAmelCase : Optional[Any] = image
_lowerCAmelCase : Tuple = super().__call__(__a, **__a)
return results
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
if "threshold" in kwargs:
_lowerCAmelCase : Dict = kwargs["threshold"]
if "top_k" in kwargs:
_lowerCAmelCase : str = kwargs["top_k"]
return {}, {}, postprocess_params
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_image(inputs["image"])
_lowerCAmelCase : Optional[Any] = inputs["candidate_labels"]
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = candidate_labels.split(",")
_lowerCAmelCase : Optional[int] = torch.tensor([[image.height, image.width]], dtype=torch.intaa)
for i, candidate_label in enumerate(__a):
_lowerCAmelCase : Any = self.tokenizer(__a, return_tensors=self.framework)
_lowerCAmelCase : Optional[Any] = self.image_processor(__a, return_tensors=self.framework)
yield {
"is_last": i == len(__a) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = model_inputs.pop("target_size")
_lowerCAmelCase : str = model_inputs.pop("candidate_label")
_lowerCAmelCase : str = model_inputs.pop("is_last")
_lowerCAmelCase : Optional[int] = self.model(**__a)
_lowerCAmelCase : Dict = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def snake_case__ ( self, __a, __a=0.1, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for model_output in model_outputs:
_lowerCAmelCase : Any = model_output["candidate_label"]
_lowerCAmelCase : Union[str, Any] = BaseModelOutput(__a)
_lowerCAmelCase : str = self.image_processor.post_process_object_detection(
outputs=__a, threshold=__a, target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_lowerCAmelCase : int = outputs["scores"][index].item()
_lowerCAmelCase : Optional[Any] = self._get_bounding_box(outputs["boxes"][index][0])
_lowerCAmelCase : Optional[int] = {"score": score, "label": label, "box": box}
results.append(__a)
_lowerCAmelCase : Optional[Any] = sorted(__a, key=lambda __a: x["score"], reverse=__a)
if top_k:
_lowerCAmelCase : Union[str, Any] = results[:top_k]
return results
def snake_case__ ( self, __a):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = box.int().tolist()
_lowerCAmelCase : int = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
import datasets
from .evaluate import evaluate
_snake_case = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_snake_case = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_snake_case = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string"),
"prediction_text": datasets.features.Sequence(datasets.Value("string")),
},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
},
}), codebase_urls=["https://www.atticusprojectai.org/cuad"], reference_urls=["https://www.atticusprojectai.org/cuad"], )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_lowerCAmelCase : Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_lowerCAmelCase : int = evaluate(dataset=__a, predictions=__a)
return score
| 36
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = '''gpt_neo'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCamelCase__=50_257 , lowerCamelCase__=2_048 , lowerCamelCase__=2_048 , lowerCamelCase__=24 , lowerCamelCase__=[[["global", "local"], 12]] , lowerCamelCase__=16 , lowerCamelCase__=None , lowerCamelCase__=256 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=1e-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_layers
__lowerCamelCase = num_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = window_size
__lowerCamelCase = activation_function
__lowerCamelCase = resid_dropout
__lowerCamelCase = embed_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = classifier_dropout
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = use_cache
__lowerCamelCase = bos_token_id
__lowerCamelCase = eos_token_id
__lowerCamelCase = attention_types
__lowerCamelCase = self.expand_attention_types_params(__snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
import torch
__lowerCamelCase = input.size()
__lowerCamelCase = len(_A )
__lowerCamelCase = shape[dimension]
__lowerCamelCase = torch.arange(0 , _A , _A )
__lowerCamelCase = torch.div(sizedim - size , _A , rounding_mode='floor' ) + 1
__lowerCamelCase = torch.arange(_A ) + low_indices[:min_length][:, None]
__lowerCamelCase = [slice(_A )] * rank
__lowerCamelCase = indices
__lowerCamelCase = input[s]
__lowerCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_A )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import torch
__lowerCamelCase = torch.arange(1 , _A )
__lowerCamelCase = torch.remainder(_A , _A )
__lowerCamelCase = remainders == 0
__lowerCamelCase = candidates[divisor_indices]
__lowerCamelCase = torch.max(_A )
return largest_divisor, torch.div(_A , _A , rounding_mode='floor' )
class __lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs' )
__lowerCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return self._config.num_heads
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCamelCase = seqlen + 2
__lowerCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCamelCase = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
__lowerCamelCase = common_inputs['attention_mask']
if self.use_past:
__lowerCamelCase = ordered_inputs['attention_mask'].dtype
__lowerCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return 13
| 358
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__SCREAMING_SNAKE_CASE : Optional[Any] = get_logger()
__SCREAMING_SNAKE_CASE : Optional[dict] = None
class lowerCamelCase_ (TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self : Any , A : List[Any]=None , A : List[str]=None , **A : List[str] ):
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
_UpperCAmelCase : Union[str, Any] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase : List[Any] = str(jax.devices()[0] )
_UpperCAmelCase : List[str] = jnp_array_kwargs
@staticmethod
def _A ( ):
import jax
return {str(A ): device for device in jax.devices()}
def _A ( self : Optional[int] , A : int ):
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _A ( self : List[str] , A : Optional[Any] ):
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase : Optional[Any] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase : Optional[Any] = {"dtype": jnp.intaa}
else:
_UpperCAmelCase : List[Any] = {"dtype": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase : List[str] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
_UpperCAmelCase : Optional[int] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _A ( self : Optional[int] , A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , "__array__" ) and not isinstance(A , jax.Array ):
_UpperCAmelCase : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _A ( self : List[str] , A : dict ):
return map_nested(self._recursive_tensorize , A , map_list=A )
def _A ( self : Dict , A : pa.Table ):
_UpperCAmelCase : Tuple = self.numpy_arrow_extractor().extract_row(A )
_UpperCAmelCase : Optional[int] = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _A ( self : Optional[Any] , A : pa.Table ):
_UpperCAmelCase : Optional[Any] = self.numpy_arrow_extractor().extract_column(A )
_UpperCAmelCase : Any = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
_UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
_UpperCAmelCase : List[Any] = self._consolidate(A )
return column
def _A ( self : List[str] , A : pa.Table ):
_UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
_UpperCAmelCase : Optional[int] = self.python_features_decoder.decode_batch(A )
_UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
for column_name in batch:
_UpperCAmelCase : Any = self._consolidate(batch[column_name] )
return batch
| 31
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ):
self.events.append("on_init_end" )
def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ):
self.events.append("on_train_begin" )
def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ):
self.events.append("on_train_end" )
def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ):
self.events.append("on_epoch_begin" )
def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ):
self.events.append("on_epoch_end" )
def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ):
self.events.append("on_step_begin" )
def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ):
self.events.append("on_step_end" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ):
self.events.append("on_evaluate" )
def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ):
self.events.append("on_predict" )
def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ):
self.events.append("on_save" )
def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ):
self.events.append("on_log" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ):
self.events.append("on_prediction_step" )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
def _A ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_UpperCAmelCase : str = RegressionDataset(length=A )
_UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A )
_UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A )
_UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A )
_UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def _A ( self : str , A : List[str] , A : List[str] ):
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
_UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
_UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def _A ( self : int , A : List[str] ):
_UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"]
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() )
_UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _A ( self : str ):
_UpperCAmelCase : Any = self.get_trainer()
_UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A )
_UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : Optional[Any] = self.get_trainer()
_UpperCAmelCase : Any = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
_UpperCAmelCase : Union[str, Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : List[Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
_UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
_UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
_UpperCAmelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0]
| 31
| 1
|
from __future__ import annotations
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
# We need to create solution object to save path.
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = run_maze(__lowerCamelCase, 0, 0, __lowerCamelCase )
if solved:
print('''\n'''.join(str(__lowerCamelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
# Final check point.
if i == j == (size - 1):
SCREAMING_SNAKE_CASE_ = 1
return True
SCREAMING_SNAKE_CASE_ = (not i < 0) and (not j < 0) # Check lower bounds
SCREAMING_SNAKE_CASE_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
SCREAMING_SNAKE_CASE_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
SCREAMING_SNAKE_CASE_ = 1
# check for directions
if (
run_maze(__lowerCamelCase, i + 1, __lowerCamelCase, __lowerCamelCase )
or run_maze(__lowerCamelCase, __lowerCamelCase, j + 1, __lowerCamelCase )
or run_maze(__lowerCamelCase, i - 1, __lowerCamelCase, __lowerCamelCase )
or run_maze(__lowerCamelCase, __lowerCamelCase, j - 1, __lowerCamelCase )
):
return True
SCREAMING_SNAKE_CASE_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
__UpperCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 257
| 0
|
'''simple docstring'''
a : int = 8.3_144_598
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Optional[Any] = 300
a : str = 28
a : Union[str, Any] = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 56
|
import random
def A ( a_ ,a_ ,a_ = False ) -> dict:
__UpperCamelCase : dict ={i: [] for i in range(a_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a_ ):
for j in range(i + 1 ,a_ ):
if random.random() < probability:
graph[i].append(a_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a_ )
return graph
def A ( a_ ) -> dict:
return {
i: [j for j in range(a_ ) if i != j] for i in range(a_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''sentencepiece.model'''}
__A = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
__A = {
'''google/rembert''': 256,
}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : int=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Optional[int]="[CLS]" , UpperCAmelCase : Optional[int]="[SEP]" , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , **UpperCAmelCase : List[str] , ):
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
__lowerCamelCase : Tuple = do_lower_case
__lowerCamelCase : str = remove_space
__lowerCamelCase : Optional[Any] = keep_accents
__lowerCamelCase : Optional[Any] = vocab_file
__lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(_lowerCamelCase )
@property
def lowerCamelCase__ ( self : Dict ):
return len(self.sp_model )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : int = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
__lowerCamelCase : Dict = self.__dict__.copy()
__lowerCamelCase : Dict = None
return state
def __setstate__( self : str , UpperCAmelCase : Any ):
__lowerCamelCase : List[str] = d
__lowerCamelCase : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=False ):
__lowerCamelCase : Optional[int] = self.sp_model.EncodeAsPieces(_lowerCamelCase )
return pieces
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : str ):
return self.sp_model.PieceToId(_lowerCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : int ):
return self.sp_model.IdToPiece(_lowerCamelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] ):
__lowerCamelCase : Optional[int] = self.sp_model.decode_pieces(_lowerCamelCase )
return out_string
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] = None ):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any = None , UpperCAmelCase : Optional[int] = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : int = None ):
__lowerCamelCase : List[Any] = [self.sep_token_id]
__lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(_lowerCamelCase ) )
return
__lowerCamelCase : Dict = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 369
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "efficientnet"
def __init__( self : Dict , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = width_coefficient
__lowerCamelCase : Any = depth_coefficient
__lowerCamelCase : Any = depth_divisor
__lowerCamelCase : Optional[Any] = kernel_sizes
__lowerCamelCase : Union[str, Any] = in_channels
__lowerCamelCase : List[Any] = out_channels
__lowerCamelCase : Optional[Any] = depthwise_padding
__lowerCamelCase : int = strides
__lowerCamelCase : int = num_block_repeats
__lowerCamelCase : Optional[Any] = expand_ratios
__lowerCamelCase : int = squeeze_expansion_ratio
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Union[str, Any] = pooling_type
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = batch_norm_eps
__lowerCamelCase : Optional[int] = batch_norm_momentum
__lowerCamelCase : Any = dropout_rate
__lowerCamelCase : List[Any] = drop_connect_rate
__lowerCamelCase : int = sum(UpperCAmelCase ) * 4
class _snake_case ( a__ ):
snake_case__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ):
return 1E-5
| 64
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=1_6 , lowerCAmelCase=3_6 , lowerCAmelCase=6 , lowerCAmelCase=6 , lowerCAmelCase=6 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
__lowercase= model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
__lowercase= model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , sentence_order_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= AlbertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= AlbertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= AlbertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : int =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] =True
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _A (self ):
__lowercase= AlbertModelTester(self )
__lowercase= ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase= type
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def _A (self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= AlbertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= AlbertModel.from_pretrained('albert-base-v2' )
__lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
__lowercase= torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __UpperCamelCase )
__lowercase= torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) )
| 295
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292
| 0
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_a : int = get_tests_dir("""fixtures""")
_a : Union[str, Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_a : Tuple = get_tests_dir("""fixtures/dummy-config.json""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 0
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__lowerCAmelCase,__lowerCAmelCase )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase,__lowerCAmelCase )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCAmelCase = WavaVecaFeatureExtractor(**__lowerCAmelCase )
# save in new folder
model_config.save_pretrained(__lowerCAmelCase )
config.save_pretrained(__lowerCAmelCase )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
# make sure private variable is not incorrectly saved
__lowerCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__lowerCAmelCase,__lowerCAmelCase )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase,__lowerCAmelCase )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase,"""bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase,revision="""aaaaaa""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""",):
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""",trust_remote_code=__lowerCAmelCase )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""",trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__,"""NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCAmelCase )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__,"""NewFeatureExtractor""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
try:
AutoConfig.register("""custom""",__lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoFeatureExtractor.register(__lowerCAmelCase,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCAmelCase )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : str =True
try:
AutoConfig.register("""custom""",__lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase,__lowerCAmelCase )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__,"""NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""",trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__,"""NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""",trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__,"""NewFeatureExtractor""" )
self.assertTrue(not hasattr(__lowerCAmelCase,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 358
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = np.max(lowercase , axis=-1 , keepdims=lowercase )
__lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {}
if "second_text" in kwargs:
__lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
return self.tokenizer(__SCREAMING_SNAKE_CASE,text_pair=__SCREAMING_SNAKE_CASE,return_tensors=self.framework )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = model_outputs.logits[0].numpy()
__lowerCAmelCase = softmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.argmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.model.config.idalabel[best_class]
__lowerCAmelCase = probabilities[best_class].item()
__lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 46
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase = 4 ) -> list[list[int]]:
'''simple docstring'''
lowercase : Optional[Any] = abs(_UpperCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCAmelCase )] for y in range(_UpperCAmelCase )]
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(_UpperCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(_UpperCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(_UpperCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
lowercase : int = [list(_UpperCAmelCase ) for x in zip(*_UpperCAmelCase )]
return matrix
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
lowercase : List[Any] = matrix[::-1]
return matrix
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
lowercase : int = [x[::-1] for x in matrix]
return matrix
def lowercase__ ( _UpperCAmelCase ) -> None:
'''simple docstring'''
for i in matrix:
print(*_UpperCAmelCase )
if __name__ == "__main__":
_UpperCamelCase: List[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
_UpperCamelCase: List[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
_UpperCamelCase: Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 255
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def lowercase ( self : Any ) -> Dict:
torch.manual_seed(0 )
lowercase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, )
lowercase : Tuple = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, )
lowercase : Any = DDIMInverseScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, )
torch.manual_seed(0 )
lowercase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
lowercase : str = CLIPTextModel(lowerCAmelCase )
lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Tuple=0 ) -> Union[str, Any]:
lowercase : List[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Tuple = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Dict=0 ) -> Optional[Any]:
lowercase : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : List[Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str]=0 ) -> Union[str, Any]:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : List[str] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int] ) -> str:
if not hasattr(self.pipeline_class, '_optional_components' ):
return
lowercase : Optional[int] = self.get_dummy_components()
lowercase : int = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowercase : Any = self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowercase : Tuple = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Optional[Any] = pipe_loaded(**lowerCAmelCase )[0]
lowercase : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase ( self : Any ) -> str:
lowercase : Union[str, Any] = 'cpu'
lowercase : Optional[int] = self.get_dummy_components()
lowercase : List[str] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_mask_inputs(lowerCAmelCase )
lowercase : str = pipe.generate_mask(**lowerCAmelCase )
lowercase : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
lowercase : List[str] = np.array([0] * 9 )
lowercase : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def lowercase ( self : int ) -> str:
lowercase : int = 'cpu'
lowercase : Dict = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : Tuple = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase ( self : List[str] ) -> Tuple:
lowercase : Dict = 'cpu'
lowercase : Any = self.get_dummy_components()
lowercase : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowercase : List[str] = DPMSolverMultistepScheduler(**lowerCAmelCase )
lowercase : Dict = DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
lowercase : str = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : int = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase ( cls : Optional[int] ) -> Tuple:
lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowercase : Optional[Any] = raw_image.convert('RGB' ).resize((768, 768) )
lowercase : Any = raw_image
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : str = torch.manual_seed(0 )
lowercase : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : Tuple = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents
lowercase : str = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='numpy', ).images[0]
lowercase : Dict = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : Dict = torch.manual_seed(0 )
lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : List[str] = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents
lowercase : int = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
lowercase : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 255
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
UpperCAmelCase : Dict = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCAmelCase : List[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 76
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Dict = 'naver-clova-ix/donut-base-finetuned-docvqa'
__lowerCAmelCase : Dict = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__lowerCAmelCase : Union[str, Any] = 'document_qa'
__lowerCAmelCase : Optional[Any] = AutoProcessor
__lowerCAmelCase : List[Any] = VisionEncoderDecoderModel
__lowerCAmelCase : Union[str, Any] = ['image', 'text']
__lowerCAmelCase : str = ['text']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
UpperCAmelCase : Any = task_prompt.replace("""{user_input}""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.pre_processor.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
UpperCAmelCase : Optional[Any] = self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_SCREAMING_SNAKE_CASE , ).sequences
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
UpperCAmelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
UpperCAmelCase : Union[str, Any] = re.sub(r"""<.*?>""" , """""" , _SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCAmelCase : Tuple = self.pre_processor.tokenajson(_SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 76
| 1
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase ( snake_case__ : ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ )
class lowerCAmelCase_ :
def __init__( self, *,
SCREAMING_SNAKE_CASE_ = np.inf, SCREAMING_SNAKE_CASE_ = "linear", SCREAMING_SNAKE_CASE_ = 0.0, ) -> None:
UpperCamelCase : str = regularization
UpperCamelCase : Union[str, Any] = gamma
if kernel == "linear":
UpperCamelCase : Tuple = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma, (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCamelCase : Dict = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase : Optional[Any] = F"""Unknown kernel: {kernel}"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> float:
return np.dot(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Optional[Any] = observations
UpperCamelCase : Dict = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase) , ) : Optional[int] = np.shape(SCREAMING_SNAKE_CASE_ )
def to_minimize(SCREAMING_SNAKE_CASE_ ) -> float:
UpperCamelCase : Optional[int] = 0
((UpperCamelCase) , ) : List[Any] = np.shape(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i], observations[j] )
)
return 1 / 2 * s - sum(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = LinearConstraint(SCREAMING_SNAKE_CASE_, 0, 0 )
UpperCamelCase : Optional[int] = Bounds(0, self.regularization )
UpperCamelCase : Optional[int] = minimize(
SCREAMING_SNAKE_CASE_, np.ones(SCREAMING_SNAKE_CASE_ ), bounds=SCREAMING_SNAKE_CASE_, constraints=[ly_contraint] ).x
UpperCamelCase : Optional[int] = l_star
# calculating mean offset of separation plane to points
UpperCamelCase : Union[str, Any] = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i], observations[j] )
UpperCamelCase : Any = s / n
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n], SCREAMING_SNAKE_CASE_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCamelCase ( snake_case__ : str ) -> Optional[int]:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def UpperCamelCase ( snake_case__ : str ) -> Any:
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Tuple = metric_id
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[int] = [MetricMock(a__ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def snake_case_ ( self ) -> str:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def UpperCamelCase ( snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ) -> int:
if "tmp_path" in args:
UpperCamelCase : Dict = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(snake_case__ , match='https://huggingface.co/docs/evaluate' ):
func(*snake_case__ )
| 119
| 1
|
'''simple docstring'''
from timeit import timeit
A__ : Union[str, Any] = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def a_ ( _UpperCAmelCase : List[Any] ) -> bool:
__snake_case : Any = 0
__snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Dict = len(SCREAMING_SNAKE_CASE_ ) // 2
__snake_case : str = len(SCREAMING_SNAKE_CASE_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE_ ) )
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> bool:
if len(SCREAMING_SNAKE_CASE_ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def a_ ( _UpperCAmelCase : List[Any] ) -> bool:
return s == s[::-1]
def a_ ( _UpperCAmelCase : Dict ) -> None:
__snake_case : Dict = f'''all({name}(key) is value for key, value in test_data.items())'''
__snake_case : Optional[Any] = f'''from __main__ import test_data, {name}'''
__snake_case : Tuple = 50_00_00
__snake_case : Union[str, Any] = timeit(stmt=SCREAMING_SNAKE_CASE_ ,setup=SCREAMING_SNAKE_CASE_ ,number=SCREAMING_SNAKE_CASE_ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 365
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : int = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['MaskFormerFeatureExtractor']
a : Tuple = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
a : Optional[int] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 105
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A_ : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A_ : Tuple = {'''unk_token''': '''<unk>'''}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
A_ : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
A_ : str = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : Dict , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : int = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[Any] = self.get_image_processor()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : Optional[int] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : int = processor(text=_lowerCamelCase )
A_ : Any = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[int] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[str] = processor.batch_decode(_lowerCamelCase )
A_ : str = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 167
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class lowerCAmelCase__ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
a__ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
a__ : str = "question"
a__ : str = "context"
a__ : str = "answers"
@property
def __A ( self : str ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 339
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[str] ) -> Dict:
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
__lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __A ( self : str ) -> Any:
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> List[str]:
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
__lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : List[Any] ) -> Optional[int]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : List[Any] ) -> Any:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __A ( self : List[Any] ) -> Any:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
_lowerCamelCase : Dict = RemBertConfig.from_json_file(lowercase__ )
print('Building PyTorch model from configuration: {}'.format(str(lowercase__ ) ) )
_lowerCamelCase : Any = RemBertModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowercase__ ) )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 96
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case :
def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str:
'''simple docstring'''
UpperCAmelCase : str =parent
UpperCAmelCase : Tuple =batch_size
UpperCAmelCase : Optional[int] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Tuple =use_input_mask
UpperCAmelCase : List[Any] =use_token_type_ids
UpperCAmelCase : Optional[Any] =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : List[Any] =hidden_size
UpperCAmelCase : Optional[int] =rotary_dim
UpperCAmelCase : Union[str, Any] =num_hidden_layers
UpperCAmelCase : List[Any] =num_attention_heads
UpperCAmelCase : Dict =intermediate_size
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : Dict =attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : List[Any] =vocab_size - 1
UpperCAmelCase : Optional[Any] =vocab_size - 1
UpperCAmelCase : List[Any] =vocab_size - 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] =None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs
UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =20
UpperCAmelCase : Any =model_class_name(snake_case__ )
UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , )
UpperCAmelCase : List[Any] =model(snake_case__ )
UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =20
UpperCAmelCase : Dict =model_class_name(snake_case__ )
UpperCAmelCase : Tuple =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : int =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : str =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@tooslow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ )
UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : str =False
UpperCAmelCase : Union[str, Any] =model.config.eos_token_id
UpperCAmelCase : List[Any] =jax.jit(model.generate )
UpperCAmelCase : Dict =jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase : Tuple =[
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(snake_case__ , snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : int =0
UpperCAmelCase : Optional[int] =1
UpperCAmelCase : Optional[int] =0
UpperCAmelCase : Union[str, Any] =1
UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
UpperCAmelCase : Union[str, Any] =fx_state
with torch.no_grad():
UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ )
UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : int =getattr(snake_case__ , snake_case__ )
UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : str =0
UpperCAmelCase : Any =1
UpperCAmelCase : List[Any] =0
UpperCAmelCase : Tuple =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ )
with torch.no_grad():
UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : Tuple =model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 348
| 0
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : List[str]=[1, 1, 2] , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : Optional[Any]="gelu_new" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=False , ) ->Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = block_sizes
A__ = num_decoder_layers
A__ = d_model
A__ = n_head
A__ = d_head
A__ = d_inner
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = 2
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = initializer_std
# Used in the tests to check the size of the first attention layer
A__ = n_head
# Used in the tests to check the size of the first hidden state
A__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A__ = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , ) ->int:
'''simple docstring'''
A__ = TFFunnelModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
A__ = False
A__ = TFFunnelModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
A__ = False
A__ = TFFunnelModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , ) ->Dict:
'''simple docstring'''
A__ = TFFunnelBaseModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
A__ = False
A__ = TFFunnelBaseModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
A__ = False
A__ = TFFunnelBaseModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
A__ = TFFunnelForPreTraining(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , ) ->Optional[Any]:
'''simple docstring'''
A__ = TFFunnelForMaskedLM(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , ) ->int:
'''simple docstring'''
A__ = self.num_labels
A__ = TFFunnelForSequenceClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , ) ->str:
'''simple docstring'''
A__ = self.num_choices
A__ = TFFunnelForMultipleChoice(config=UpperCAmelCase__)
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , ) ->str:
'''simple docstring'''
A__ = self.num_labels
A__ = TFFunnelForTokenClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , ) ->List[Any]:
'''simple docstring'''
A__ = TFFunnelForQuestionAnswering(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
A__ = TFFunnelModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__)
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = TFFunnelModelTester(self , base=UpperCAmelCase__)
A__ = ConfigTester(self , config_class=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__)
| 231
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
create_state_space_tree(lowercase_ , [] , 0 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 231
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class _snake_case ( a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"question": Value("string" ), "context": Value("string" )} )
snake_case__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
snake_case__ = "question"
snake_case__ = "context"
snake_case__ = "answers"
@property
def lowerCamelCase__ ( self : Optional[int] ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 135
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = '''pt'''
elif is_tf_available():
__A = '''tf'''
else:
__A = '''jax'''
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = PerceiverTokenizer
snake_case__ = False
def lowerCamelCase__ ( self : List[str] ):
super().setUp()
__lowerCamelCase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def lowerCamelCase__ ( self : List[Any] , **UpperCAmelCase : str ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : str=20 , UpperCAmelCase : Union[str, Any]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCamelCase : Dict = []
for i in range(len(UpperCAmelCase ) ):
try:
__lowerCamelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase : Any = list(filter(lambda UpperCAmelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , UpperCAmelCase ) )
__lowerCamelCase : str = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
__lowerCamelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
__lowerCamelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase : str = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase : Optional[int] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
__lowerCamelCase : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
__lowerCamelCase : Optional[int] = " " + output_txt
__lowerCamelCase : List[Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = self.perceiver_tokenizer
__lowerCamelCase : Optional[int] = "Unicode €."
__lowerCamelCase : Union[str, Any] = tokenizer(UpperCAmelCase )
__lowerCamelCase : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : List[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]Unicode €.[SEP]" )
__lowerCamelCase : Optional[Any] = tokenizer("e è é ê ë" )
__lowerCamelCase : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : Optional[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__lowerCamelCase : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__lowerCamelCase : Dict = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
__lowerCamelCase : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowerCamelCase : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCAmelCase )
self.assertIn("attention_mask" , UpperCAmelCase )
self.assertNotIn("decoder_input_ids" , UpperCAmelCase )
self.assertNotIn("decoder_attention_mask" , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = [
"Summary of the text.",
"Another summary.",
]
__lowerCamelCase : int = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="max_length" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCamelCase__ ( self : str ):
# safety check on max_len default value so we are sure the test works
__lowerCamelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__lowerCamelCase : Any = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Tuple = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
__lowerCamelCase : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Dict = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__lowerCamelCase : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__lowerCamelCase : Tuple = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Optional[int] = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Any = json.load(UpperCAmelCase )
__lowerCamelCase : Tuple = [F"""<extra_id_{i}>""" for i in range(125 )]
__lowerCamelCase : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
__lowerCamelCase : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase : List[Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCAmelCase )]
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def lowerCamelCase__ ( self : List[str] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__lowerCamelCase : List[str] = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
__lowerCamelCase : Any = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 135
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
lowercase_ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowercase_ = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Benchmark training of model"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Verbose memory tracing"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Trace memory line by line"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Save result to a CSV file"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Save all print statements in a log file"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Whether to print environment information"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
lowercase_ = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
lowercase_ = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
lowercase_ = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
lowercase_ = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
lowercase_ = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
lowercase_ = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
lowercase_ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case ( self : List[Any] ):
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , SCREAMING_SNAKE_CASE , )
def snake_case ( self : Dict ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case ( self : Union[str, Any] ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case ( self : Dict ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 364
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = tokenizer(example["content"] , truncation=lowerCamelCase__ )["input_ids"]
lowercase__ : Optional[int] = len(example["content"] ) / len(output["input_ids"] )
return output
lowerCAmelCase__ = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase__ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__ = multiprocessing.cpu_count()
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 121
| 0
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {'''vocab_file''': '''vocab.json'''}
UpperCAmelCase_ : Union[str, Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
UpperCAmelCase_ : Dict = {'''mgp-str''': 27}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int="[GO]" , __lowerCamelCase : Union[str, Any]="[GO]" , __lowerCamelCase : Tuple="[s]" , __lowerCamelCase : Any="[GO]" , **__lowerCamelCase : Optional[int] ):
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase :Tuple = json.load(__lowerCamelCase )
UpperCamelCase :int = {v: k for k, v in self.vocab.items()}
@property
def _A ( self : Optional[Any] ):
return len(self.vocab )
def _A ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def _A ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
UpperCamelCase :List[Any] = []
for s in text:
char_tokens.extend(__lowerCamelCase )
return char_tokens
def _A ( self : int , __lowerCamelCase : List[Any] ):
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def _A ( self : str , __lowerCamelCase : List[Any] ):
return self.decoder.get(__lowerCamelCase )
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__lowerCamelCase ) )
return
UpperCamelCase :List[str] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + """\n""" )
return (vocab_file,)
| 38
|
"""simple docstring"""
from math import factorial
A_ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64
| 0
|
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_lowercase : List[Any] = 'src/diffusers'
# Matches is_xxx_available()
_lowercase : List[Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
_lowercase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
_lowercase : List[Any] = '\n{0} = None\n'
_lowercase : Optional[int] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
_lowercase : Optional[Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = _re_backend.findall(snake_case_ )
if len(snake_case_ ) == 0:
return None
return "_and_".join(snake_case_ )
def lowercase__ ( ):
with open(os.path.join(snake_case_ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCAmelCase = 0
__UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(snake_case_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case_ ) and len(lines[line_index] ) > 1:
__UpperCAmelCase = lines[line_index]
__UpperCAmelCase = _re_single_line_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case_ ) > 0:
__UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[Any] ):
if name.isupper():
return DUMMY_CONSTANT.format(snake_case_ )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case_ , snake_case_ )
else:
return DUMMY_CLASS.format(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :int=None ):
if backend_specific_objects is None:
__UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCAmelCase = '''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
__UpperCAmelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case_ , snake_case_ ) for o in objects] )
__UpperCAmelCase = dummy_file
return dummy_files
def lowercase__ ( snake_case_ :Optional[int]=False ):
__UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCAmelCase = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__UpperCAmelCase = os.path.join(snake_case_ , '''utils''' )
__UpperCAmelCase = {
backend: os.path.join(snake_case_ , F'''dummy_{short_names.get(snake_case_ , snake_case_ )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case_ ):
with open(snake_case_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
else:
__UpperCAmelCase = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(snake_case_ , snake_case_ )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(snake_case_ , snake_case_ )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowercase : Union[str, Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 86
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : str = logging.get_logger(__name__)
A__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Tuple = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
A__ : Optional[int] = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
_a = GPTaTokenizer
def __init__( self : Tuple , A_ : Any=None , A_ : Any=None , A_ : Any=None , A_ : List[Any]="<|endoftext|>" , A_ : Optional[Any]="<|endoftext|>" , A_ : Dict="<|endoftext|>" , A_ : Optional[Any]=False , **A_ : int , ):
super().__init__(
A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , )
lowerCAmelCase_ : str = kwargs.pop('''add_bos_token''' , A_)
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , A_) != add_prefix_space:
lowerCAmelCase_ : Any = getattr(A_ , pre_tok_state.pop('''type'''))
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : int = pre_tok_class(**A_)
lowerCAmelCase_ : Union[str, Any] = add_prefix_space
def UpperCAmelCase__ ( self : Optional[Any] , *A_ : Optional[Any] , **A_ : Union[str, Any]):
lowerCAmelCase_ : List[str] = kwargs.get('''is_split_into_words''' , A_)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_)
def UpperCAmelCase__ ( self : Dict , *A_ : List[str] , **A_ : Union[str, Any]):
lowerCAmelCase_ : Union[str, Any] = kwargs.get('''is_split_into_words''' , A_)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : str , A_ : Optional[str] = None):
lowerCAmelCase_ : List[Any] = self._tokenizer.model.save(A_ , name=A_)
return tuple(A_)
def UpperCAmelCase__ ( self : Optional[Any] , A_ : "Conversation"):
lowerCAmelCase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_) + [self.eos_token_id])
if len(A_) > self.model_max_length:
lowerCAmelCase_ : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 103
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 )-> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = None ) -> Tuple:
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
lowerCAmelCase__ : Optional[Any] = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : int = nums[i]
lowerCAmelCase__ : Any = max(SCREAMING_SNAKE_CASE_ , ans + num , SCREAMING_SNAKE_CASE_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase__ = int(input("""Enter number of elements : """).strip())
lowerCamelCase__ = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 360
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='git_vision_model'
def __init__( self : Dict , a : Optional[Any]=768 , a : Union[str, Any]=3072 , a : Tuple=12 , a : int=12 , a : List[str]=3 , a : str=224 , a : List[str]=16 , a : List[Any]="quick_gelu" , a : Any=1e-5 , a : str=0.0 , a : Optional[int]=0.02 , **a : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
@classmethod
def __UpperCamelCase ( cls : int , a : Union[str, os.PathLike] , **a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
SCREAMING_SNAKE_CASE : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='git'
def __init__( self : Optional[Any] , a : Optional[Any]=None , a : str=3_0522 , a : Dict=768 , a : int=6 , a : Optional[int]=12 , a : List[str]=3072 , a : int="gelu" , a : List[str]=0.1 , a : int=0.1 , a : List[Any]=1024 , a : Union[str, Any]=0.02 , a : Dict=1e-12 , a : Any=0 , a : Any="absolute" , a : List[Any]=True , a : Optional[Any]=False , a : Any=101 , a : Union[str, Any]=102 , a : Optional[Any]=None , **a : Any , ) -> str:
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , pad_token_id=a , **a )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
SCREAMING_SNAKE_CASE : Optional[Any] = GitVisionConfig(**a )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = tie_word_embeddings
SCREAMING_SNAKE_CASE : Tuple = num_image_with_embedding
SCREAMING_SNAKE_CASE : int = bos_token_id
SCREAMING_SNAKE_CASE : Any = eos_token_id
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.__class__.model_type
return output
| 76
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias
| 76
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
SCREAMING_SNAKE_CASE : Any = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
SCREAMING_SNAKE_CASE : int = {
"allenai/longformer-base-4096": 4096,
"allenai/longformer-large-4096": 4096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase_( ) -> Any:
_lowercase : Dict = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowercase : Optional[int] = bs[:]
_lowercase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
_lowercase : List[Any] = [chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
_lowercase : Any = set()
_lowercase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : Union[str, Any] = char
return pairs
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase="replace", lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", lowerCamelCase=False, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else bos_token
_lowercase : int = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else eos_token
_lowercase : Optional[int] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else sep_token
_lowercase : Union[str, Any] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else cls_token
_lowercase : Dict = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else unk_token
_lowercase : List[Any] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Dict = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token
super().__init__(
errors=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
with open(lowerCamelCase, encoding='utf-8') as vocab_handle:
_lowercase : List[Any] = json.load(lowerCamelCase)
_lowercase : Dict = {v: k for k, v in self.encoder.items()}
_lowercase : List[str] = errors # how to handle errors in decoding
_lowercase : List[str] = bytes_to_unicode()
_lowercase : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase, encoding='utf-8') as merges_handle:
_lowercase : Union[str, Any] = merges_handle.read().split('\n')[1:-1]
_lowercase : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
_lowercase : str = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : Optional[int] = {}
_lowercase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : Tuple = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return len(self.encoder)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase : List[str] = tuple(lowerCamelCase)
_lowercase : Optional[Any] = get_pairs(lowerCamelCase)
if not pairs:
return token
while True:
_lowercase : List[Any] = min(lowerCamelCase, key=lambda lowerCamelCase: self.bpe_ranks.get(lowerCamelCase, float('inf')))
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : Any = bigram
_lowercase : int = []
_lowercase : Optional[Any] = 0
while i < len(lowerCamelCase):
try:
_lowercase : str = word.index(lowerCamelCase, lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowercase : int = j
if word[i] == first and i < len(lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowercase : Union[str, Any] = tuple(lowerCamelCase)
_lowercase : List[Any] = new_word
if len(lowerCamelCase) == 1:
break
else:
_lowercase : str = get_pairs(lowerCamelCase)
_lowercase : Dict = ' '.join(lowerCamelCase)
_lowercase : Optional[int] = word
return word
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = []
for token in re.findall(self.pat, lowerCamelCase):
_lowercase : str = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase).split(' '))
return bpe_tokens
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
return self.encoder.get(lowerCamelCase, self.encoder.get(self.unk_token))
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
return self.decoder.get(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Tuple = ''.join(lowerCamelCase)
_lowercase : Dict = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Dict = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_lowercase : str = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCamelCase, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase, ensure_ascii=lowerCamelCase) + '\n')
_lowercase : Dict = 0
with open(lowerCamelCase, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
_lowercase : List[str] = token_index
writer.write(' '.join(lowerCamelCase) + '\n')
index += 1
return vocab_file, merge_file
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : List[str] = [self.cls_token_id]
_lowercase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase)) + [1]
return [1] + ([0] * len(lowerCamelCase)) + [1, 1] + ([0] * len(lowerCamelCase)) + [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Any = [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=False, **lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase) > 0 and not text[0].isspace()):
_lowercase : Optional[int] = ' ' + text
return (text, kwargs)
| 84
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : str = logging.getLogger()
def UpperCamelCase_( ) -> Any:
_lowercase : int = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowercase : Optional[Any] = parser.parse_args()
return args.f
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, 'run_glue_deebert.py')
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase, 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
| 84
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : str , _snake_case : bool = False ):
if not isinstance(_snake_case , _snake_case ):
lowerCAmelCase : Optional[Any] = f'''Expected string as input, found {type(_snake_case )}'''
raise ValueError(_snake_case )
if not isinstance(_snake_case , _snake_case ):
lowerCAmelCase : Tuple = f'''Expected boolean as use_pascal parameter, found {type(_snake_case )}'''
raise ValueError(_snake_case )
lowerCAmelCase : Any = input_str.split('''_''' )
lowerCAmelCase : str = 0 if use_pascal else 1
lowerCAmelCase : Tuple = words[start_index:]
lowerCAmelCase : Dict = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCAmelCase : Dict = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0
| 0
|
"""simple docstring"""
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Any = multiprocessing.Manager()
lowercase__ : Dict = manager.list()
lowercase__ : Union[str, Any] = multiprocessing.Process(target=__lowerCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase__ : List[str] = shutil.rmtree
lowercase__ : Optional[Any] = os.rmdir
lowercase__ : Union[str, Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase__ : int = {}
with swallow_io():
with time_limit(__lowerCamelCase ):
exec(__lowerCamelCase , __lowerCamelCase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowercase__ : Optional[Any] = rmtree
lowercase__ : str = rmdir
lowercase__ : str = chdir
@contextlib.contextmanager
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
def signal_handler(__lowerCamelCase , __lowerCamelCase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowerCamelCase )
signal.signal(signal.SIGALRM , __lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowerCamelCase ):
with contextlib.redirect_stderr(__lowerCamelCase ):
with redirect_stdin(__lowerCamelCase ):
yield
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> List[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowerCamelCase ):
yield dirname
class __A ( A_ ):
'''simple docstring'''
pass
class __A ( io.StringIO ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,*_snake_case : int ,**_snake_case : List[Any] ) -> str:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : Any ,*_snake_case : Tuple ,**_snake_case : Dict ) -> Any:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : Dict ,*_snake_case : Dict ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : int ,*_snake_case : str ,**_snake_case : str ) -> int:
"""simple docstring"""
return False
class __A ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
lowerCAmelCase : List[Any] = "stdin"
@contextlib.contextmanager
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
if root == ".":
yield
return
lowercase__ : List[Any] = os.getcwd()
os.chdir(__lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase=None ) -> Optional[int]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase__ : List[str] = None
lowercase__ : Tuple = None
import os
lowercase__ : List[str] = '''1'''
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
lowercase__ : Optional[Any] = None
lowercase__ : List[str] = None
lowercase__ : str = None
lowercase__ : str = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[Any] = None
lowercase__ : Tuple = None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
lowercase__ : Any = None
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
lowercase__ : str = None
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
lowercase__ : Any = None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
lowercase__ : List[str] = None
import shutil
lowercase__ : List[Any] = None
lowercase__ : List[Any] = None
lowercase__ : Tuple = None
import subprocess
lowercase__ : Any = None # type: ignore
lowercase__ : int = None
import sys
lowercase__ : str = None
lowercase__ : Tuple = None
lowercase__ : int = None
lowercase__ : Optional[Any] = None
lowercase__ : Optional[Any] = None
| 365
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
| 0
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Dict = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : str = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : List[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Union[str, Any] = '3.0.12'
lowerCAmelCase_ : Any = None
def _lowerCamelCase ( ) -> Dict:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : List[str] ):
_a = lock_file
return None
def __str__( self : Optional[Any] ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , __a : Optional[Any] ):
_a = lock
return None
def __enter__( self : Union[str, Any] ):
return self.lock
def __exit__( self : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , __a : int , __a : List[Any]=-1 , __a : List[str]=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : str ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : List[str] ):
raise NotImplementedError()
def UpperCamelCase__ ( self : Optional[Any] ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : List[str] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : Dict , __a : Any=None , __a : Union[str, Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[int]=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : str , __a : Optional[int] ):
self.release()
return None
def __del__( self : Union[str, Any] ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : str , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : Tuple , __a : Union[str, Any]=-1 , __a : List[str]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : Optional[int] ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : int ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : List[Any] , __a : str=-1 , __a : List[Any]=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Dict ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Any ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : Tuple = None
if msvcrt:
lowerCAmelCase_ : Union[str, Any] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : Tuple = UnixFileLock
else:
lowerCAmelCase_ : Tuple = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 63
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63
| 1
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase: Optional[Any] = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase: Any = torch.permute(__SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__SCREAMING_SNAKE_CASE ):
# linear layer
__lowerCAmelCase: List[Any] = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase: Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase: Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "metadata" in layer:
__lowerCAmelCase: Tuple = layer.split("metadata" )
__lowerCAmelCase: Optional[int] = "".join(split_layer[0] )[:-1]
__lowerCAmelCase: Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
__lowerCAmelCase: str = layer.split("kvstore" )
__lowerCAmelCase: Any = "".join(split_layer[0] )[:-1]
__lowerCAmelCase: Optional[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
__lowerCAmelCase: Any = layer.split("/" )
__lowerCAmelCase: int = "/".join(split_layer[:-1] )
__lowerCAmelCase: Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase: Union[str, Any] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__lowerCAmelCase: Optional[Any] = "file"
else:
__lowerCAmelCase: int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase: Any = rename_keys(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = {}
for k, v in current_block.items():
__lowerCAmelCase: List[str] = v
__lowerCAmelCase: int = new_current_block
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> Optional[int]:
__lowerCAmelCase: List[str] = convert_file_size_to_int(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = []
__lowerCAmelCase: str = {}
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: Union[str, Any] = 0
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
__lowerCAmelCase: Optional[int] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
__lowerCAmelCase: Tuple = flatten_dict(__SCREAMING_SNAKE_CASE , sep="/" )
__lowerCAmelCase: List[Any] = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase: List[str] = get_key_and_tensorstore_dict(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
__lowerCAmelCase: List[Any] = content
else:
__lowerCAmelCase: List[str] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase: Tuple = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase: Dict = torch.tensor(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase: Union[str, Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = "/".join(__SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase: str = os.path.join(
__SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F"-{len(__SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin" ) )
rename_and_save_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase: int = {}
__lowerCAmelCase: int = 0
__lowerCAmelCase: List[str] = raw_weights.to(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase: Tuple = os.path.join(__SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F"-{len(__SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin" ) )
rename_and_save_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase: str = {}
__lowerCAmelCase: List[Any] = {}
for idx, shard in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(__SCREAMING_SNAKE_CASE ):05d}.bin" ) # len(sharded_state_dicts):05d}
__lowerCAmelCase: int = os.path.join(__SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = shard
for key in shard:
__lowerCAmelCase: str = shard_file
# Add the metadata
__lowerCAmelCase: Any = {"total_size": total_size}
__lowerCAmelCase: List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , "w" , encoding="utf-8" ) as f:
__lowerCAmelCase: Dict = json.dumps(__SCREAMING_SNAKE_CASE , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE ) + "\n"
f.write(__SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase: int = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
__lowerCAmelCase: List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
__lowerCAmelCase: str = TaTokenizer.from_pretrained("t5-small" )
__lowerCAmelCase: int = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
__lowerCAmelCase: Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
__lowerCAmelCase: Optional[int] = model.generate(__SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 359
|
"""simple docstring"""
import numpy as np
def a__ ( __SCREAMING_SNAKE_CASE ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63
| 0
|
import torch
from diffusers import DiffusionPipeline
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
def __call__( self )->Tuple:
'''simple docstring'''
A_ : int = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : List[Any] = 1
A_ : int = self.unet(A_ , A_ ).sample
A_ : Optional[Any] = self.scheduler.step(A_ , A_ , A_ ).prev_sample
A_ : Any = scheduler_output - scheduler_output + torch.ones_like(A_ )
return result
| 371
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
require_version(deps[pkg] , SCREAMING_SNAKE_CASE )
| 65
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A :
'''simple docstring'''
def __init__(self : Tuple , _UpperCAmelCase : str , ) -> List[str]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = """gelu"""
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = TFEsmModel(config=_UpperCAmelCase )
lowercase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase__ = model(_UpperCAmelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = True
lowercase__ = TFEsmModel(config=_UpperCAmelCase )
lowercase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowercase__ = model(_UpperCAmelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFEsmForMaskedLM(config=_UpperCAmelCase )
lowercase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFEsmForTokenClassification(config=_UpperCAmelCase )
lowercase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = TFEsmModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFEsmModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCamelCase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase__ = model.get_bias()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for k, v in name.items():
assert isinstance(_UpperCAmelCase , tf.Variable )
else:
lowercase__ = model.get_output_embeddings()
assert x is None
lowercase__ = model.get_bias()
assert name is None
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : int ) -> str:
"""simple docstring"""
lowercase__ = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ = model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 305
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Tuple ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class _snake_case :
lowerCAmelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _snake_case :
lowerCAmelCase_ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
lowerCAmelCase_ : str = field(metadata={"help": "Should contain the data files for the task."} )
lowerCAmelCase_ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(snake_case )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case : EvalPrediction ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=snake_case , args=snake_case , train_dataset=snake_case , eval_dataset=snake_case , compute_metrics=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , snake_case , snake_case )
writer.write("%s = %s\n" % (key, value) )
results.update(snake_case )
return results
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 92
|
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
while b:
snake_case_ , snake_case_ = b, a % b
return a
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(snake_case , a % b )
def UpperCamelCase_( ):
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 92
| 1
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> tuple:
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> XGBClassifier:
lowercase__: List[Any] = XGBClassifier()
classifier.fit(__UpperCAmelCase , __UpperCAmelCase )
return classifier
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: Optional[int] = load_iris()
lowercase__, lowercase__: int = data_handling(__UpperCAmelCase )
lowercase__, lowercase__, lowercase__, lowercase__: Dict = train_test_split(
__UpperCAmelCase , __UpperCAmelCase , test_size=0.2_5 )
lowercase__: Dict = iris['''target_names''']
# Create an XGBoost Classifier from the training data
lowercase__: List[Any] = xgboost(__UpperCAmelCase , __UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , display_labels=__UpperCAmelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 177
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 1
|
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ :Optional[Any] = 300 # TEMPERATURE (unit = K)
def A ( a_ ,a_ ,a_ ,) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
from math import pow, sqrt
def A ( *a_ ) -> bool:
__UpperCamelCase : Union[str, Any] =len(a_ ) > 0 and all(value > 0.0 for value in values )
return result
def A ( a_ ,a_ ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(a_ ,a_ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 245
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : List[str] = len(_lowerCAmelCase ) + 1
lowercase__ : Any = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : List[str] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
lowercase__ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
lowercase__ : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : Tuple = dp[i - 1][j]
else:
lowercase__ : Tuple = 0
else:
lowercase__ : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Any = "aab"
_UpperCamelCase : int = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 77
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCamelCase : int = 299792458
# Symbols
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""")
def a_ ( _A ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def a_ ( _A ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_A ) ** 2 )
def a_ ( _A ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a_ ( _A , _A = None ) -> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCamelCase : List[Any] = transform(29979245)
print("""Example of four vector: """)
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
__UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 307
| 0
|
def UpperCamelCase (lowercase_: bytes ) -> str:
return "".join([hex(lowercase_ )[2:].zfill(2 ).upper() for byte in list(lowercase_ )] )
def UpperCamelCase (lowercase_: str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
|
from typing import Any
def UpperCamelCase (lowercase_: list ) -> list[Any]:
if not input_list:
return []
A__ : Any = [input_list.count(lowercase_ ) for value in input_list]
A__ : List[Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 1
|
'''simple docstring'''
from __future__ import annotations
class __a :
def __init__( self : Any , __magic_name__ : int = 0 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = key
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : int ) -> list[str]:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__magic_name__ ) ^ key ) for ch in content]
def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : int ) -> list[str]:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__magic_name__ ) ^ key ) for ch in content]
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCAmelCase_ : Optional[Any] = ''''''
for ch in content:
ans += chr(ord(__magic_name__ ) ^ key )
return ans
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCAmelCase_ : Dict = ''''''
for ch in content:
ans += chr(ord(__magic_name__ ) ^ key )
return ans
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : int = 0 ) -> bool:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
try:
with open(__magic_name__ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__magic_name__ , __magic_name__ ) )
except OSError:
return False
return True
def UpperCAmelCase__ ( self : Any , __magic_name__ : str , __magic_name__ : int ) -> bool:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
try:
with open(__magic_name__ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__magic_name__ , __magic_name__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 125
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a :
def __init__( self : Union[str, Any] , __magic_name__ : Dict=2 , __magic_name__ : Dict=3 , __magic_name__ : Any=64 , __magic_name__ : List[Any]=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = np.random.default_rng(__magic_name__ )
UpperCAmelCase_ : Dict = length
UpperCAmelCase_ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase_ : str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __a (torch.nn.Module ):
def __init__( self : Optional[int] , __magic_name__ : Union[str, Any]=0 , __magic_name__ : List[str]=0 , __magic_name__ : List[str]=False ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Optional[int] = True
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
UpperCAmelCase_ : Optional[Any] = False
return x * self.a[0] + self.b[0]
class __a (torch.nn.Module ):
def __init__( self : Any , __magic_name__ : Any=0 , __magic_name__ : List[str]=0 , __magic_name__ : Any=False ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Optional[int] = torch.nn.Parameter(torch.tensor(__magic_name__ ).float() )
UpperCAmelCase_ : str = torch.nn.Parameter(torch.tensor(__magic_name__ ).float() )
UpperCAmelCase_ : Tuple = True
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
UpperCAmelCase_ : Dict = False
return x * self.a + self.b
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : int = 16 ) -> List[Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ : Optional[Any] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ : Union[str, Any] = load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ : int = {v: i for i, v in enumerate(SCREAMING_SNAKE_CASE__ )}
def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(
examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ : List[str] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''sentence1''', '''sentence2''', '''label'''], )
def collate_fn(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : Tuple = DataLoader(tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=2 )
UpperCAmelCase_ : Optional[int] = DataLoader(tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=1 )
return train_dataloader, eval_dataloader
| 125
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __magic_name__ ( lowercase__ ):
"""simple docstring"""
__UpperCamelCase = 'gpt_neo'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self :List[str] , snake_case :List[Any]=50_257 , snake_case :Union[str, Any]=2_048 , snake_case :List[str]=2_048 , snake_case :str=24 , snake_case :Dict=[[["global", "local"], 12]] , snake_case :Union[str, Any]=16 , snake_case :Optional[Any]=None , snake_case :Dict=256 , snake_case :Any="gelu_new" , snake_case :List[str]=0.0 , snake_case :Dict=0.0 , snake_case :Any=0.0 , snake_case :str=0.1 , snake_case :str=1e-5 , snake_case :str=0.02 , snake_case :Tuple=True , snake_case :Any=50_256 , snake_case :Any=50_256 , **snake_case :int , ):
'''simple docstring'''
A_ : str = vocab_size
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[Any] = hidden_size
A_ : List[Any] = num_layers
A_ : Optional[int] = num_heads
A_ : Any = intermediate_size
A_ : Optional[int] = window_size
A_ : Tuple = activation_function
A_ : Union[str, Any] = resid_dropout
A_ : Optional[Any] = embed_dropout
A_ : List[str] = attention_dropout
A_ : Optional[int] = classifier_dropout
A_ : Optional[Any] = layer_norm_epsilon
A_ : Optional[int] = initializer_range
A_ : List[str] = use_cache
A_ : int = bos_token_id
A_ : Dict = eos_token_id
A_ : List[str] = attention_types
A_ : Any = self.expand_attention_types_params(_UpperCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> int:
import torch
A_ : str = input.size()
A_ : int = len(lowerCamelCase_ )
A_ : int = shape[dimension]
A_ : Dict = torch.arange(0 , lowerCamelCase_ , lowerCamelCase_ )
A_ : List[str] = torch.div(sizedim - size , lowerCamelCase_ , rounding_mode="floor" ) + 1
A_ : str = torch.arange(lowerCamelCase_ ) + low_indices[:min_length][:, None]
A_ : List[Any] = [slice(lowerCamelCase_ )] * rank
A_ : List[Any] = indices
A_ : str = input[s]
A_ : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase_ )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> Tuple:
import torch
A_ : Tuple = torch.arange(1 , lowerCamelCase_ )
A_ : List[Any] = torch.remainder(lowerCamelCase_ , lowerCamelCase_ )
A_ : Union[str, Any] = remainders == 0
A_ : Union[str, Any] = candidates[divisor_indices]
A_ : Optional[Any] = torch.max(lowerCamelCase_ )
return largest_divisor, torch.div(lowerCamelCase_ , lowerCamelCase_ , rounding_mode="floor" )
class __magic_name__ ( lowercase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCamelCase , direction="inputs" )
A_ : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self._config.num_heads
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :PreTrainedTokenizer , snake_case :int = -1 , snake_case :int = -1 , snake_case :bool = False , snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
A_ : List[str] = super(_UpperCamelCase , self ).generate_dummy_inputs(
_UpperCamelCase , batch_size=_UpperCamelCase , seq_length=_UpperCamelCase , is_pair=_UpperCamelCase , framework=_UpperCamelCase )
# We need to order the input in the way they appears in the forward()
A_ : int = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A_ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ : Any = seqlen + 2
A_ : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ : Optional[Any] = [
(torch.zeros(_UpperCamelCase ), torch.zeros(_UpperCamelCase )) for _ in range(self.num_layers )
]
A_ : List[str] = common_inputs["""attention_mask"""]
if self.use_past:
A_ : List[Any] = ordered_inputs["""attention_mask"""].dtype
A_ : Tuple = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCamelCase , _UpperCamelCase , dtype=_UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return 13
| 358
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __snake_case ( ) -> tuple[list[int], int]:
A_ : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
A_ : List[str] = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : List[Any] = make_dataset()
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
A_ : Tuple = len(_lowerCAmelCase )
for i in range(n - 1 ):
A_ , A_ : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __snake_case ( ) -> tuple[float, float]:
A_ : Union[str, Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
A_ : Tuple = "\ntriplet_sum1(*dataset)\n"
A_ : Optional[Any] = "\ntriplet_sum2(*dataset)\n"
A_ : List[str] = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 )
A_ : Tuple = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 70
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 255 , A = True , A = None , A = None , A = True , **A , ) -> None:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE = do_convert_rgb
def snake_case_( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(A , size=size["""shortest_edge"""] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> List[str]:
return rescale(A , scale=A , data_format=A , **A )
def snake_case_( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A , param_name="""size""" , default_to_square=A )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(A , param_name="""crop_size""" , default_to_square=A )
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=A , mean=A , std=A ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A , A ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 58
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 0
|
import re
def __lowercase ( __lowerCAmelCase : str ):
a__ = re.compile(R'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(__lowerCAmelCase , __lowerCAmelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 363
|
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] ='visual_bert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = vocab_size
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :str = hidden_size
UpperCamelCase :Optional[Any] = visual_embedding_dim
UpperCamelCase :Optional[Any] = num_hidden_layers
UpperCamelCase :Optional[Any] = num_attention_heads
UpperCamelCase :Optional[Any] = intermediate_size
UpperCamelCase :str = hidden_act
UpperCamelCase :Any = hidden_dropout_prob
UpperCamelCase :Dict = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :str = type_vocab_size
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :List[str] = bypass_transformer
UpperCamelCase :Dict = special_visual_initialize
| 259
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any="attention" ):
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
UpperCamelCase :Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[int] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
UpperCamelCase :List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
UpperCamelCase :Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
UpperCamelCase :str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ):
if split_mlp_wi:
UpperCamelCase :List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
UpperCamelCase :int = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
UpperCamelCase :str = (wi_a, wi_a)
else:
UpperCamelCase :Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
UpperCamelCase :Optional[int] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool = False ):
UpperCamelCase :Tuple = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase :List[Any] = {'''/'''.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :int = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :Dict = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :int = q.T
UpperCamelCase :Any = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Any = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Optional[Any] = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[str] = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' ).T
UpperCamelCase :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCamelCase :str = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''encoder''' ).T
UpperCamelCase :Any = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''self_attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Optional[int] = o.T
UpperCamelCase :Tuple = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase :Tuple = layer_norm
UpperCamelCase :Optional[Any] = k.T
UpperCamelCase :List[str] = o.T
UpperCamelCase :List[str] = q.T
UpperCamelCase :str = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[str] = wi[0].T
UpperCamelCase :str = wi[1].T
else:
UpperCamelCase :Dict = wi.T
UpperCamelCase :Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Tuple = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' ).T
UpperCamelCase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool ):
UpperCamelCase :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase :List[Any] = state_dict['''shared.weight''']
return state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Dict = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase :Any = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :List[str] = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :Any = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 259
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
# Load checkpoint
lowercase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowercase : str = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowercase : Tuple = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase : str = v
else:
lowercase : int = v
lowercase : Tuple = chkpt["""params"""]
lowercase : Optional[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE__ , (torch.FloatTensor, numpy.ndarray) )}
lowercase : Union[str, Any] = chkpt["""dico_word2id"""]
lowercase : Union[str, Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase : Tuple = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase : Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase : int = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase : Union[str, Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 285
|
import math
from datetime import datetime, timedelta
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> datetime:
lowercase : Any = year % 19
lowercase : Optional[int] = year % 4
lowercase : Any = year % 7
lowercase : str = math.floor(year / 100 )
lowercase : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowercase : Tuple = leap_day_inhibits / 4
lowercase : Any = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowercase : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase : str = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowercase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 285
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.