code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( a , unittest.TestCase ):
lowercase__ : Dict = KandinskyVaaInpaintPipeline
lowercase__ : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowercase__ : Union[str, Any] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowercase__ : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase__ : int = False
@property
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return 32
@property
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def __snake_case( self : Any ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_unet
SCREAMING_SNAKE_CASE = self.dummy_movq
SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any]=0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE = 0
if str(_UpperCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "cpu"
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __snake_case( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = "a hat"
SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 702
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 0
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCamelCase : List[Any] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
_lowerCamelCase : str = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
_lowerCamelCase : Dict = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __snake_case( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = compute_bleu(
reference_corpus=_UpperCamelCase , translation_corpus=_UpperCamelCase , max_order=_UpperCamelCase , smooth=_UpperCamelCase )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 703
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Optional[Any] = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[str] = '''jax'''
class lowercase ( a , unittest.TestCase ):
lowercase__ : Union[str, Any] = ByTaTokenizer
lowercase__ : str = False
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __snake_case( self : List[str] , **_UpperCamelCase : str ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]=20 , _UpperCamelCase : Dict=5 ) -> Tuple[str, list]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(len(_UpperCamelCase ) ):
try:
SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCamelCase ) , _UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE = " " + output_txt
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
SCREAMING_SNAKE_CASE = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = "Unicode €."
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , _UpperCamelCase )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , "Unicode €.</s>" )
SCREAMING_SNAKE_CASE = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , _UpperCamelCase )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , _UpperCamelCase )
self.assertIn("attention_mask" , _UpperCamelCase )
self.assertNotIn("decoder_input_ids" , _UpperCamelCase )
self.assertNotIn("decoder_attention_mask" , _UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE = tokenizer(
text_target=_UpperCamelCase , max_length=32 , padding="max_length" , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __snake_case( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization. </s>"]
SCREAMING_SNAKE_CASE = ["Summary of the text. </s>"]
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , text_target=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch["input_ids"][0] )
self.assertEqual(_UpperCamelCase , batch["labels"][0] )
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(_UpperCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCamelCase )
def __snake_case( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [F"<extra_id_{i}>" for i in range(125 )]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
_UpperCamelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=_UpperCamelCase )]
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(_UpperCamelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
pass
def __snake_case( self : Any ) -> int:
'''simple docstring'''
pass
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
pass
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=_UpperCamelCase , do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
for attr in attributes_list:
setattr(_UpperCamelCase , attr + "_id" , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + "_id" ) , _UpperCamelCase )
setattr(_UpperCamelCase , attr + "_id" , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + "_id" ) , _UpperCamelCase )
setattr(_UpperCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens_ids" ) , [] )
setattr(_UpperCamelCase , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 704
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 0
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase (UpperCAmelCase__ : int ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
SCREAMING_SNAKE_CASE = []
for num in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def __lowerCamelCase ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 705
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : str=3 , _UpperCamelCase : List[Any]=30 , _UpperCamelCase : List[str]=400 , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Any=[0.5, 0.5, 0.5] , _UpperCamelCase : str=True , _UpperCamelCase : Optional[int]=1 / 255 , _UpperCamelCase : Any=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]=False ) -> int:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Tuple = DeformableDetrImageProcessor if is_vision_available() else None
def __snake_case( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DeformableDetrImageProcessingTester(self )
@property
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 706
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """ernie_m"""
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , _UpperCamelCase : int = 250_002 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 3_072 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 514 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=0.0 , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = act_dropout
| 647
| 0
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647
| 0
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCamelCase : List[Any] = Lock()
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , UpperCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE = Pipe()
SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE = temp_rs
SCREAMING_SNAKE_CASE = temp_rr
for i in range(1 , len(UpperCAmelCase__ ) - 1 ):
SCREAMING_SNAKE_CASE = Pipe()
SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE = temp_rs
SCREAMING_SNAKE_CASE = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(
len(UpperCAmelCase__ ) - 1,
arr[len(UpperCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = list(range(1_0 , 0 , -1 ) )
print("Initial List" )
print(*UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = odd_even_transposition(UpperCAmelCase__ )
print("Sorted List\n" )
print(*UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 709
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase :
def __init__( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=100 , _UpperCamelCase : Any=13 , _UpperCamelCase : Optional[Any]=30 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=True , _UpperCamelCase : Any=32 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=10 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : int=3 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=[0, 1, 2, 3] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 100
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = out_indices
SCREAMING_SNAKE_CASE = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __snake_case( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __snake_case( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = BeitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = BeitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Dict = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : str = False
lowercase__ : int = False
lowercase__ : Any = False
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCamelCase ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).loss
loss.backward()
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).loss
loss.backward()
def __snake_case( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = BeitModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values.to(_UpperCamelCase )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(pixel_values=_UpperCamelCase , bool_masked_pos=_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -13.9_174], [-3.2_4_5_6, 0.4_9_4_8, -13.9_401], [-3.2_0_3_3, 0.5_1_2_1, -13.8_550]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCamelCase , atol=1e-2 ) )
@slow
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCamelCase )
@slow
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCamelCase )
@slow
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=_UpperCamelCase , size=640 , do_center_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=_UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=_UpperCamelCase , size=640 , do_center_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 710
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 0
|
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = [True] * n
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE = i * 2
while index < n:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = index + i
SCREAMING_SNAKE_CASE = [2]
for i in range(3 , UpperCAmelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase__ )
return primes
def __lowerCamelCase (UpperCAmelCase__ : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
SCREAMING_SNAKE_CASE = math.floor(math.sqrt(UpperCAmelCase__ ) ) + 1_0_0
SCREAMING_SNAKE_CASE = prime_sieve(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE = primes[prime_index + 1]
SCREAMING_SNAKE_CASE = last_prime**2
SCREAMING_SNAKE_CASE = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 711
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=13 , _UpperCamelCase : Dict=3 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Dict=224 , _UpperCamelCase : Optional[Any]=1_000 , _UpperCamelCase : Optional[Any]=[3, 3, 6, 4] , _UpperCamelCase : Union[str, Any]=[48, 56, 112, 220] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCamelCase , layer_scale_init_value=1e-5 , )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __snake_case( self : int , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase__ : str = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Optional[Any] = False
lowercase__ : Optional[Any] = False
lowercase__ : Any = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __snake_case( self : str ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
pass
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_UpperCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
def _config_zero_init(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(_UpperCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_UpperCamelCase , _UpperCamelCase , 1e-10 )
if isinstance(getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(_UpperCamelCase , _UpperCamelCase ) )
setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 712
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 647
| 0
|
'''simple docstring'''
import argparse
_lowerCamelCase : List[Any] = '''docs/source/_static/js/custom.js'''
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] ):
with open(UpperCAmelCase__ , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
SCREAMING_SNAKE_CASE = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_lowerCamelCase : Union[str, Any] = parser.parse_args()
update_custom_js(args.version)
| 713
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase : str = logging.getLogger(__name__)
@dataclass
class lowercase :
lowercase__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ : bool = field(default=a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowercase :
lowercase__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
lowercase__ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ : bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
SCREAMING_SNAKE_CASE = import_module("tasks" )
try:
SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , model_args.task_type )
SCREAMING_SNAKE_CASE = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
SCREAMING_SNAKE_CASE = token_classification_task.get_labels(data_args.labels )
SCREAMING_SNAKE_CASE = dict(enumerate(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
SCREAMING_SNAKE_CASE = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray ) -> Tuple[List[int], List[int]]:
SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ , axis=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = preds.shape
SCREAMING_SNAKE_CASE = [[] for _ in range(UpperCAmelCase__ )]
SCREAMING_SNAKE_CASE = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
SCREAMING_SNAKE_CASE = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = trainer.predict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 714
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] = None ):
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else ""
# apply OCR
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : str , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE = [flip_channel_order(_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 647
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class lowercase ( a ):
lowercase__ : int = """roberta"""
def __init__( self : int , _UpperCamelCase : Union[str, Any]=50_265 , _UpperCamelCase : Any=768 , _UpperCamelCase : Any=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : Any=1e-12 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : List[Any]="absolute" , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class lowercase ( a ):
@property
def __snake_case( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 715
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 716
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase :
lowercase__ : str
lowercase__ : str = None
@staticmethod
def __snake_case( ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError
def __snake_case( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def __snake_case( self : str , _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
raise NotImplementedError
def __snake_case( self : str ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def __snake_case( cls : str ) -> Union[str, Any]:
'''simple docstring'''
return F"`pip install {cls.pip_package or cls.name}`"
class lowercase ( a ):
lowercase__ : Optional[Any] = """optuna"""
@staticmethod
def __snake_case( ) -> Optional[int]:
'''simple docstring'''
return is_optuna_available()
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : str , **_UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
return default_hp_space_optuna(_UpperCamelCase )
class lowercase ( a ):
lowercase__ : Union[str, Any] = """ray"""
lowercase__ : int = """'ray[tune]'"""
@staticmethod
def __snake_case( ) -> Union[str, Any]:
'''simple docstring'''
return is_ray_available()
def __snake_case( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str , **_UpperCamelCase : str ) -> Any:
'''simple docstring'''
return run_hp_search_ray(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_ray(_UpperCamelCase )
class lowercase ( a ):
lowercase__ : Tuple = """sigopt"""
@staticmethod
def __snake_case( ) -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def __snake_case( self : Any , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : str , **_UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
return run_hp_search_sigopt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(_UpperCamelCase )
class lowercase ( a ):
lowercase__ : Tuple = """wandb"""
@staticmethod
def __snake_case( ) -> Union[str, Any]:
'''simple docstring'''
return is_wandb_available()
def __snake_case( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : str , **_UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
return run_hp_search_wandb(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Dict ) -> str:
'''simple docstring'''
return default_hp_space_wandb(_UpperCamelCase )
_lowerCamelCase : List[Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE = available_backends[0].name
if len(UpperCAmelCase__ ) > 1:
logger.info(
F"{len(UpperCAmelCase__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 717
|
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : int = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = BartphoTokenizer
lowercase__ : Dict = False
lowercase__ : Union[str, Any] = True
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = ["▁This", "▁is", "▁a", "▁t", "est"]
SCREAMING_SNAKE_CASE = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
SCREAMING_SNAKE_CASE = BartphoTokenizer(_UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case( self : str , **_UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "This is a là test"
SCREAMING_SNAKE_CASE = "This is a<unk><unk> test"
return input_text, output_text
def __snake_case( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BartphoTokenizer(_UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = "This is a là test"
SCREAMING_SNAKE_CASE = "▁This ▁is ▁a ▁l à ▁t est".split()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
| 718
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase (UpperCAmelCase__ : int ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
SCREAMING_SNAKE_CASE = []
for num in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def __lowerCamelCase ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 647
| 0
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase (UpperCAmelCase__ : int ):
'''simple docstring'''
if hor == 1_2_8:
SCREAMING_SNAKE_CASE = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE = (3_2, 1_2_8, 2_5_6)
SCREAMING_SNAKE_CASE = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 3_2:
SCREAMING_SNAKE_CASE = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
SCREAMING_SNAKE_CASE = (3_2, 6_4, 1_2_8, 2_5_6)
SCREAMING_SNAKE_CASE = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
SCREAMING_SNAKE_CASE = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 1_4,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5_5_3_6,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
hf_value_function.load_state_dict(UpperCAmelCase__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase ():
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"in_channels": 1_4,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (3_2, 6_4, 1_2_8, 2_5_6),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5_5_3_6,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
SCREAMING_SNAKE_CASE = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCAmelCase__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
hf_value_function.load_state_dict(UpperCAmelCase__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 719
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase (UpperCAmelCase__ : Optional[int]=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCAmelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 720
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 0
|
import os
import time
import numpy as np
import onnxruntime as ort
_lowerCamelCase : Union[str, Any] = '''1'''
_lowerCamelCase : Any = '''0'''
_lowerCamelCase : int = '''1'''
_lowerCamelCase : List[str] = ort.SessionOptions()
_lowerCamelCase : List[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
_lowerCamelCase : str = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
_lowerCamelCase : Optional[int] = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
_lowerCamelCase : Optional[Any] = ort.RunOptions()
_lowerCamelCase : Dict = 1_28
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Any = np.ones((batch, sequence), dtype=np.intaa)
_lowerCamelCase : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
_lowerCamelCase : Dict = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : List[Any] = 20_00
_lowerCamelCase : Union[str, Any] = {}
for iter in range(max_iters):
_lowerCamelCase : Any = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 10_00 / max_iters))
| 721
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=False ):
SCREAMING_SNAKE_CASE = "backbone." if is_semantic else ""
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE = "backbone." if is_semantic else ""
# queries, keys and values
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = q_bias
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
SCREAMING_SNAKE_CASE = gamma_a
SCREAMING_SNAKE_CASE = gamma_a
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = False if "rvlcdip" in checkpoint_url else True
SCREAMING_SNAKE_CASE = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase__ , use_mask_token=UpperCAmelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
# labels
if "rvlcdip" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "rvlcdip-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["model"]
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , has_lm_head=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , has_lm_head=UpperCAmelCase__ )
# load HuggingFace model
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(UpperCAmelCase__ ) if has_lm_head else BeitForImageClassification(UpperCAmelCase__ )
model.eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image
SCREAMING_SNAKE_CASE = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.logits
# verify logits
SCREAMING_SNAKE_CASE = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(UpperCAmelCase__ ), "Shape of logits not as expected"
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
if has_lm_head:
SCREAMING_SNAKE_CASE = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
SCREAMING_SNAKE_CASE = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__ , UpperCAmelCase__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCAmelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__ , UpperCAmelCase__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCAmelCase__ , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float = 1e-12 , UpperCAmelCase__ : int = 1_0_0 , ):
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(UpperCAmelCase__ , UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 647
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 701
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Any=48_000 , _UpperCamelCase : Optional[Any]=480 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 14_000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale="htk" , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case( self : str ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Tuple , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : int = 1_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE = set(range(3 , UpperCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase__ , UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = [float(UpperCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase__ , limit + 1 , UpperCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase__ )]
# Reverse whole list
SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
_lowerCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 703
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647
| 0
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase ( a , unittest.TestCase ):
lowercase__ : List[str] = RoFormerTokenizer
lowercase__ : Any = RoFormerTokenizerFast
lowercase__ : str = True
lowercase__ : List[str] = True
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
def __snake_case( self : List[Any] , **_UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_UpperCamelCase )
def __snake_case( self : Union[str, Any] , **_UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_UpperCamelCase )
def __snake_case( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "永和服装饰品有限公司,今天天气非常好"
SCREAMING_SNAKE_CASE = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def __snake_case( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , output_text.split() )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def __snake_case( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , output_text.split() )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
| 704
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 0
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowerCamelCase : List[Any] = 20_48
_lowerCamelCase : List[str] = 40_96
_lowerCamelCase : Optional[Any] = 42
_lowerCamelCase : Optional[Any] = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_lowerCamelCase : Any = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] ):
def choose_first(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=False ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 1:
SCREAMING_SNAKE_CASE = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
SCREAMING_SNAKE_CASE = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
SCREAMING_SNAKE_CASE = {"id": example["id"]}
SCREAMING_SNAKE_CASE = example["annotations"]
SCREAMING_SNAKE_CASE = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
SCREAMING_SNAKE_CASE = ["yes"] if 1 in yes_no_answer else ["no"]
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ["<cls>"]
else:
SCREAMING_SNAKE_CASE = ["short"]
SCREAMING_SNAKE_CASE = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
SCREAMING_SNAKE_CASE = ["long"]
SCREAMING_SNAKE_CASE = choose_first(annotation["long_answer"] , is_long_answer=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = []
answer.update(UpperCAmelCase__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , UpperCAmelCase__ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False ):
SCREAMING_SNAKE_CASE = _get_single_answer(UpperCAmelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE = example["document"]["tokens"]
SCREAMING_SNAKE_CASE = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(UpperCAmelCase__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
SCREAMING_SNAKE_CASE = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
SCREAMING_SNAKE_CASE = example["document"]["tokens"]
SCREAMING_SNAKE_CASE = answer["start_token"]
SCREAMING_SNAKE_CASE = answer["end_token"]
SCREAMING_SNAKE_CASE = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
SCREAMING_SNAKE_CASE = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
SCREAMING_SNAKE_CASE = doc["is_html"][answer["start_token"] : answer["end_token"]]
SCREAMING_SNAKE_CASE = doc["token"][answer["start_token"] : answer["end_token"]]
SCREAMING_SNAKE_CASE = " ".join([old[i] for i in range(len(UpperCAmelCase__ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , UpperCAmelCase__ , end="\n" )
print("Old:" , UpperCAmelCase__ , end="\n\n" )
return {
"context": " ".join(UpperCAmelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : List[Any]=4_0_9_6 , UpperCAmelCase__ : Tuple=True ):
# overlap will be of doc_stride - q_len
SCREAMING_SNAKE_CASE = get_context_and_ans(UpperCAmelCase__ , assertion=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
SCREAMING_SNAKE_CASE = tokenizer(example["question"]["text"] , out["context"] ).input_ids
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = input_ids[:q_len]
SCREAMING_SNAKE_CASE = range(UpperCAmelCase__ , len(UpperCAmelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
SCREAMING_SNAKE_CASE = i + max_length - q_len
SCREAMING_SNAKE_CASE = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(UpperCAmelCase__ ),
"end_token": [-1_0_0] * len(UpperCAmelCase__ ),
"category": category,
},
}
SCREAMING_SNAKE_CASE = out["context"].split()
SCREAMING_SNAKE_CASE = splitted_context[answer["end_token"]]
SCREAMING_SNAKE_CASE = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=UpperCAmelCase__ , ).input_ids )
SCREAMING_SNAKE_CASE = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=UpperCAmelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
SCREAMING_SNAKE_CASE = len(tokenizer(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
SCREAMING_SNAKE_CASE = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
SCREAMING_SNAKE_CASE = answer["start_token"]
SCREAMING_SNAKE_CASE = answer["end_token"]
if assertion:
SCREAMING_SNAKE_CASE = tokenizer.decode(UpperCAmelCase__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , UpperCAmelCase__ , end="\n\n" )
if len(UpperCAmelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
SCREAMING_SNAKE_CASE = input_ids[:q_len]
SCREAMING_SNAKE_CASE = range(UpperCAmelCase__ , len(UpperCAmelCase__ ) , max_length - doc_stride )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [] # null, yes, no, long, short
for i in doc_start_indices:
SCREAMING_SNAKE_CASE = i + max_length - q_len
SCREAMING_SNAKE_CASE = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
SCREAMING_SNAKE_CASE = start_token - i + q_len
SCREAMING_SNAKE_CASE = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
SCREAMING_SNAKE_CASE = -1_0_0
SCREAMING_SNAKE_CASE = -1_0_0
answers_category.append("null" )
SCREAMING_SNAKE_CASE = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase__ )
answers_end_token.append(UpperCAmelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(UpperCAmelCase__ ) )
print("Old:" , tokenizer.decode(UpperCAmelCase__ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]=2_0_4_8 , UpperCAmelCase__ : Dict=4_0_9_6 , UpperCAmelCase__ : Optional[int]=False ):
SCREAMING_SNAKE_CASE = get_strided_contexts_and_ans(
UpperCAmelCase__ , UpperCAmelCase__ , doc_stride=UpperCAmelCase__ , max_length=UpperCAmelCase__ , assertion=UpperCAmelCase__ , )
return example
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
with jsonlines.open(UpperCAmelCase__ , "a" ) as writer:
for example in tqdm(UpperCAmelCase__ , total=len(UpperCAmelCase__ ) , desc="Saving samples ... " ):
SCREAMING_SNAKE_CASE = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowerCamelCase : str = load_dataset('''natural_questions''')
_lowerCamelCase : Optional[int] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_lowerCamelCase : Optional[int] = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_lowerCamelCase : Tuple = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_lowerCamelCase : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowerCamelCase : Any = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_lowerCamelCase : Union[str, Any] = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 705
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 0
|
import argparse
import os
import re
import packaging.version
_lowerCamelCase : str = '''examples/'''
_lowerCamelCase : List[Any] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_lowerCamelCase : Any = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
_lowerCamelCase : Dict = '''README.md'''
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ):
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE = replace.replace("VERSION" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] ):
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern="examples" )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE = "1. Want to contribute a new model?"
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase__ )
def __lowerCamelCase ():
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["init"][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : str=False ):
SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE = input(F"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = default_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = get_version()
SCREAMING_SNAKE_CASE = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE = input(F"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = dev_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowerCamelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 706
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """ernie_m"""
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , _UpperCamelCase : int = 250_002 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 3_072 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 514 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=0.0 , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = act_dropout
| 647
| 0
|
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float = 1e-12 , UpperCAmelCase__ : int = 1_0_0 , ):
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(UpperCAmelCase__ , UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 0
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
lowercase__ : Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
lowercase__ : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} )
lowercase__ : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
lowercase__ : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
lowercase__ : Optional[int] = field(
default=10_000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
lowercase__ : Optional[float] = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
lowercase__ : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
lowercase__ : Optional[int] = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
lowercase__ : Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
lowercase__ : Optional[bool] = field(
default=a , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
lowercase__ : Optional[int] = field(default=50_000 , metadata={"""help""": """Maximum number of training steps."""} )
lowercase__ : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
lowercase__ : Optional[int] = field(default=1_024 , metadata={"""help""": """Sequence lengths used for training."""} )
lowercase__ : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} )
lowercase__ : Optional[int] = field(
default=1_024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
lowercase__ : Optional[bool] = field(default=a , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
lowercase__ : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
lowercase__ : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
lowercase__ : Optional[int] = field(default=1_024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
lowercase__ : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
lowercase__ : Optional[int] = field(default=a , metadata={"""help""": """Number of workers used for code evaluation."""} )
lowercase__ : Optional[int] = field(
default=a , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
lowercase__ : Optional[bool] = field(
default=a , metadata={"""help""": """Sample from the language model's output distribution."""} )
lowercase__ : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
lowercase__ : Optional[int] = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
lowercase__ : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
lowercase__ : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
lowercase__ : Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
lowercase__ : Optional[int] = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
lowercase__ : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
lowercase__ : Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
lowercase__ : Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
lowercase__ : Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
lowercase__ : Optional[int] = field(
default=a , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
lowercase__ : Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
lowercase__ : Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
lowercase__ : Optional[int] = field(
default=100_000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
lowercase__ : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
lowercase__ : Optional[float] = field(
default=1_000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
lowercase__ : Optional[float] = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
lowercase__ : Optional[float] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
lowercase__ : Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
lowercase__ : Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
lowercase__ : Optional[bool] = field(
default=a , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
lowercase__ : Optional[float] = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
lowercase__ : Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
lowercase__ : Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
lowercase__ : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
lowercase__ : Optional[int] = field(default=200_000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
lowercase__ : Optional[int] = field(
default=32_768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
lowercase__ : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
lowercase__ : Optional[bool] = field(default=a , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
lowercase__ : Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
lowercase__ : Optional[int] = field(default=a , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
lowercase__ : Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
lowercase__ : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
lowercase__ : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
lowercase__ : Optional[bool] = field(default=a , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 708
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowercase ( a ):
lowercase__ : Tuple = """biogpt"""
def __init__( self : Dict , _UpperCamelCase : Union[str, Any]=42_384 , _UpperCamelCase : List[str]=1_024 , _UpperCamelCase : int=24 , _UpperCamelCase : Dict=16 , _UpperCamelCase : Optional[int]=4_096 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=1_024 , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Optional[int]=1e-12 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Optional[Any]=2 , **_UpperCamelCase : Tuple , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = scale_embedding
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 709
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 0
|
from __future__ import annotations
class lowercase :
def __init__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text, pattern
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = len(_UpperCamelCase ), len(_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __snake_case( self : Optional[int] ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE = self.mismatch_in_text(_UpperCamelCase )
if mismatch_index == -1:
positions.append(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCamelCase : Optional[int] = '''ABAABA'''
_lowerCamelCase : Tuple = '''AB'''
_lowerCamelCase : str = BoyerMooreSearch(text, pattern)
_lowerCamelCase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 710
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowerCamelCase : str = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
_lowerCamelCase : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ):
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = collections.OrderedDict()
SCREAMING_SNAKE_CASE = collections.OrderedDict()
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = b
SCREAMING_SNAKE_CASE = idx
for wd in b:
SCREAMING_SNAKE_CASE = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( a ):
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict="<|endoftext|>" , _UpperCamelCase : Tuple="<|endoftext|>" , _UpperCamelCase : int="<|startoftext|>" , _UpperCamelCase : int="<|endoftext|>" , _UpperCamelCase : Optional[Any]=False , **_UpperCamelCase : Tuple , ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , do_clean_text=_UpperCamelCase , **_UpperCamelCase , )
if not os.path.isfile(_UpperCamelCase ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(_UpperCamelCase ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
SCREAMING_SNAKE_CASE = do_clean_text
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_vocab_and_emoji(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __snake_case( self : int ) -> int:
'''simple docstring'''
return len(self.raw_vocab )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __snake_case( self : Dict , _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(_UpperCamelCase , clean=self.do_clean_text )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token ) )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).strip()
return out_string
def __snake_case( self : Tuple , _UpperCamelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
def __snake_case( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
if os.path.isdir(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
SCREAMING_SNAKE_CASE = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
SCREAMING_SNAKE_CASE = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(",".join(_UpperCamelCase ) + "\n" )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , _UpperCamelCase )
return vocab_file, emoji_file
class lowercase ( a ):
def __init__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab # same as swe
SCREAMING_SNAKE_CASE = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE = emoji
SCREAMING_SNAKE_CASE = np.max([len(_UpperCamelCase ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
SCREAMING_SNAKE_CASE = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
SCREAMING_SNAKE_CASE = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
SCREAMING_SNAKE_CASE = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
SCREAMING_SNAKE_CASE = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
SCREAMING_SNAKE_CASE = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
SCREAMING_SNAKE_CASE = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
SCREAMING_SNAKE_CASE = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
SCREAMING_SNAKE_CASE = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Tuple ) -> List[Any]:
'''simple docstring'''
return len(self.ids_to_tokens )
def __snake_case( self : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.content_repattera.sub("<URL>" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.content_repattera.sub("<EMAIL>" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.content_repattera.sub("<TEL>" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.content_repattera.sub("<DATE>" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.content_repattera.sub("<DATE>" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.content_repattera.sub("<PRICE>" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def __snake_case( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = text.replace(" " , "<SP>" )
SCREAMING_SNAKE_CASE = text.replace(" " , "<SP>" )
SCREAMING_SNAKE_CASE = text.replace("\r\n" , "<BR>" )
SCREAMING_SNAKE_CASE = text.replace("\n" , "<BR>" )
SCREAMING_SNAKE_CASE = text.replace("\r" , "<BR>" )
SCREAMING_SNAKE_CASE = text.replace("\t" , "<TAB>" )
SCREAMING_SNAKE_CASE = text.replace("—" , "ー" )
SCREAMING_SNAKE_CASE = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE = text.replace(_UpperCamelCase , _UpperCamelCase )
if clean:
SCREAMING_SNAKE_CASE = self.clean_text(_UpperCamelCase )
def check_simbol(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = x.encode()
if len(_UpperCamelCase ) == 1 and len(_UpperCamelCase ) == 2:
SCREAMING_SNAKE_CASE = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2_A_1 and c <= 0xC_2_B_F)
or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3)
or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F)
or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2)
):
return True
return False
def checkuae(_UpperCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = x.encode()
if len(_UpperCamelCase ) == 1 and len(_UpperCamelCase ) == 3:
SCREAMING_SNAKE_CASE = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F:
return True
return False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
while pos < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = min(len(_UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
SCREAMING_SNAKE_CASE = [] # (token_id, token, pos)
for e in range(_UpperCamelCase , _UpperCamelCase , -1 ):
SCREAMING_SNAKE_CASE = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_UpperCamelCase ) > 2:
SCREAMING_SNAKE_CASE = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_UpperCamelCase ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[0] )[0]
result.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE = e
else:
SCREAMING_SNAKE_CASE = pos + 1
SCREAMING_SNAKE_CASE = text[pos:end]
if check_simbol(_UpperCamelCase ):
result.append("<KIGOU>" )
elif checkuae(_UpperCamelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
SCREAMING_SNAKE_CASE = end
return result
def __snake_case( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any="\n" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_UpperCamelCase ) > 0:
words.append(bytearray(_UpperCamelCase ).decode("utf-8" , errors="replace" ) )
SCREAMING_SNAKE_CASE = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(_UpperCamelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
words.append(bytearray(_UpperCamelCase ).decode("utf-8" , errors="replace" ) )
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase )
return text
| 712
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 647
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase ( a ):
lowercase__ : Optional[Any] = """dandelin/vilt-b32-finetuned-vqa"""
lowercase__ : List[Any] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
lowercase__ : Optional[int] = """image_qa"""
lowercase__ : Dict = AutoProcessor
lowercase__ : str = AutoModelForVisualQuestionAnswering
lowercase__ : List[str] = ["""image""", """text"""]
lowercase__ : List[Any] = ["""text"""]
def __init__( self : Tuple , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : "Image" , _UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(_UpperCamelCase , _UpperCamelCase , return_tensors="pt" )
def __snake_case( self : Any , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
return self.model(**_UpperCamelCase ).logits
def __snake_case( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 713
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
| 0
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE = tokenizer("Hello there" , return_tensors="np" ).input_ids
SCREAMING_SNAKE_CASE = tokenizer("Hi I am" , return_tensors="np" ).input_ids
SCREAMING_SNAKE_CASE = shift_tokens_right(_UpperCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , decoder_input_ids=_UpperCamelCase ).logits
SCREAMING_SNAKE_CASE = optax.softmax_cross_entropy(_UpperCamelCase , onehot(_UpperCamelCase , logits.shape[-1] ) ).mean()
SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 714
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] = None ):
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else ""
# apply OCR
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : str , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE = [flip_channel_order(_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 647
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase : Dict = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class lowercase ( a ):
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
lowercase__ : Tuple = GPTaTokenizer
def __init__( self : str , _UpperCamelCase : int=None , _UpperCamelCase : Any=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Dict="<|endoftext|>" , _UpperCamelCase : Union[str, Any]="<|endoftext|>" , _UpperCamelCase : Any="<|endoftext|>" , _UpperCamelCase : List[Any]=False , **_UpperCamelCase : Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = kwargs.pop("add_bos_token" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = add_prefix_space
def __snake_case( self : Dict , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words" , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[Any] ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words" , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 715
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Tuple = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
SCREAMING_SNAKE_CASE = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : str ):
if "handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 1_0_2_4
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = "relu"
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
# load HuggingFace model
SCREAMING_SNAKE_CASE = ViTModel(UpperCAmelCase__ , add_pooling_layer=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = TrOCRForCausalLM(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" , check_hash=UpperCAmelCase__ )["model"]
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
if key.startswith("decoder" ) and "output_projection" not in key:
SCREAMING_SNAKE_CASE = val
else:
SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("roberta-large" )
SCREAMING_SNAKE_CASE = TrOCRProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = processor(images=prepare_img(UpperCAmelCase__ ) , return_tensors="pt" ).pixel_values
# verify logits
SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
SCREAMING_SNAKE_CASE = model(pixel_values=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 716
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : int = 1_0 , UpperCAmelCase__ : int = 1_0_0_0 , UpperCAmelCase__ : bool = True ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
return int((number_a + number_a) / 2 )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(UpperCAmelCase__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(UpperCAmelCase__ ) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = int(input("Enter lower value : " ).strip() )
SCREAMING_SNAKE_CASE = int(input("Enter high value : " ).strip() )
SCREAMING_SNAKE_CASE = int(input("Enter value to guess : " ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 717
|
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
| 0
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : int
lowercase__ : int
lowercase__ : float
lowercase__ : float
lowercase__ : Tuple[int]
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __snake_case( self : Dict ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(_UpperCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __snake_case( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.shape
SCREAMING_SNAKE_CASE = int(np.prod(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = self.get_image_coords()
SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE = self.get_camera_rays(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rays.view(_UpperCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __snake_case( self : Dict , _UpperCamelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE = coords.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = self.resolution()
SCREAMING_SNAKE_CASE = self.fov()
SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE = fracs.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = (
self.z.view(_UpperCamelCase , 1 , 3 )
+ self.x.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(_UpperCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_UpperCamelCase , *_UpperCamelCase , 2 , 3 )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_UpperCamelCase , height=_UpperCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
SCREAMING_SNAKE_CASE = np.array([np.sin(UpperCAmelCase__ ), np.cos(UpperCAmelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE = -z * 4
SCREAMING_SNAKE_CASE = np.array([np.cos(UpperCAmelCase__ ), -np.sin(UpperCAmelCase__ ), 0.0] )
SCREAMING_SNAKE_CASE = np.cross(UpperCAmelCase__ , UpperCAmelCase__ )
origins.append(UpperCAmelCase__ )
xs.append(UpperCAmelCase__ )
ys.append(UpperCAmelCase__ )
zs.append(UpperCAmelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , width=UpperCAmelCase__ , height=UpperCAmelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCAmelCase__ )) , )
| 718
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase (UpperCAmelCase__ : int ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
SCREAMING_SNAKE_CASE = []
for num in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def __lowerCamelCase ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 647
| 0
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __lowerCamelCase ():
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=UpperCAmelCase__ , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=UpperCAmelCase__ , default=5 )
parser.add_argument("--batch_size" , type=UpperCAmelCase__ , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=UpperCAmelCase__ , default=1 )
parser.add_argument("--freeze" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
parser.add_argument("--learning_rate" , type=UpperCAmelCase__ , default=5e-4 )
parser.add_argument("--seed" , type=UpperCAmelCase__ , default=0 )
parser.add_argument("--lr_scheduler_type" , type=UpperCAmelCase__ , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=UpperCAmelCase__ , default=1_0 )
parser.add_argument("--weight_decay" , type=UpperCAmelCase__ , default=0.01 )
parser.add_argument("--output_dir" , type=UpperCAmelCase__ , default="./results" )
return parser.parse_args()
_lowerCamelCase : str = load('''accuracy''')
def __lowerCamelCase (UpperCAmelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eval_pred
SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )
class lowercase ( a ):
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> None:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = trainer
def __snake_case( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if control.should_evaluate:
SCREAMING_SNAKE_CASE = deepcopy(_UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __lowerCamelCase ():
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_args()
set_seed(args.seed )
SCREAMING_SNAKE_CASE = load_dataset("codeparrot/codecomplex" , split="train" )
SCREAMING_SNAKE_CASE = dataset.train_test_split(test_size=0.2 )
SCREAMING_SNAKE_CASE = train_test["test"].train_test_split(test_size=0.5 )
SCREAMING_SNAKE_CASE = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
SCREAMING_SNAKE_CASE = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = tokenizer(example["src"] , truncation=UpperCAmelCase__ , max_length=1_0_2_4 )
SCREAMING_SNAKE_CASE = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
SCREAMING_SNAKE_CASE = train_test_validation.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=train_test_validation["train"].column_names , )
SCREAMING_SNAKE_CASE = DataCollatorWithPadding(tokenizer=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , )
print("Training..." )
trainer.add_callback(CustomCallback(UpperCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 719
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowerCamelCase : Union[str, Any] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowerCamelCase : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowerCamelCase : str = '''|'''.join(sys.argv[1:])
_lowerCamelCase : Optional[Any] = re.compile(rf"""^({joined_dirs}).*?\.py$""")
_lowerCamelCase : Tuple = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 720
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowercase ( unittest.TestCase ):
def __snake_case( self : Tuple , _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
SCREAMING_SNAKE_CASE = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_UpperCamelCase )
def __snake_case( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sgugger/tiny-distilbert-classification"
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , only_pretrain_model=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , torchscript=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , fpaa=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCamelCase , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __snake_case( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tinier_bart"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tinier_bart"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , save_to_csv=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCamelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(_UpperCamelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(_UpperCamelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(_UpperCamelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(_UpperCamelCase , "env.csv" ) , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "env.csv" ) ).exists() )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_UpperCamelCase : Optional[int] ):
self.assertTrue(hasattr(_UpperCamelCase , "sequential" ) )
self.assertTrue(hasattr(_UpperCamelCase , "cumulative" ) )
self.assertTrue(hasattr(_UpperCamelCase , "current" ) )
self.assertTrue(hasattr(_UpperCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCamelCase , "log.txt" ) , log_print=_UpperCamelCase , trace_memory_line_by_line=_UpperCamelCase , multi_process=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = PyTorchBenchmark(_UpperCamelCase )
SCREAMING_SNAKE_CASE = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "log.txt" ) ).exists() )
| 721
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647
| 0
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , a , )
class lowercase ( a ):
lowercase__ : Optional[int] = RobertaConfig
lowercase__ : Optional[Any] = """roberta"""
def __init__( self : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
super().__init__(_UpperCamelCase )
SCREAMING_SNAKE_CASE = RobertaEmbeddings(_UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , a , )
class lowercase ( a ):
lowercase__ : Optional[Any] = RobertaConfig
lowercase__ : List[str] = """roberta"""
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_UpperCamelCase )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = config.num_hidden_layers
SCREAMING_SNAKE_CASE = DeeRobertaModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : str=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Tuple=-1 , _UpperCamelCase : int=False , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_layers
try:
SCREAMING_SNAKE_CASE = self.roberta(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , position_ids=_UpperCamelCase , head_mask=_UpperCamelCase , inputs_embeds=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = outputs[1]
SCREAMING_SNAKE_CASE = self.dropout(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.classifier(_UpperCamelCase )
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE = e.message
SCREAMING_SNAKE_CASE = e.exit_layer
SCREAMING_SNAKE_CASE = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE = entropy(_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE = MSELoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE = MSELoss()
SCREAMING_SNAKE_CASE = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCamelCase )
if train_highway:
SCREAMING_SNAKE_CASE = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 700
|
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float = 1e-12 , UpperCAmelCase__ : int = 1_0_0 , ):
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(UpperCAmelCase__ , UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 647
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 701
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Any=48_000 , _UpperCamelCase : Optional[Any]=480 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 14_000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale="htk" , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case( self : str ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Tuple , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 647
| 0
|
from __future__ import annotations
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not nums:
return 0
SCREAMING_SNAKE_CASE = nums[0]
SCREAMING_SNAKE_CASE = 0
for num in nums[1:]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
max_excluding + num,
max(UpperCAmelCase__ , UpperCAmelCase__ ),
)
return max(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 0
|
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = sorted(zip(UpperCAmelCase__ , UpperCAmelCase__ ) , key=lambda UpperCAmelCase__ : x[0] / x[1] , reverse=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE = list(accumulate(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = bisect(UpperCAmelCase__ , UpperCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase : Union[str, Any] = '''facebook/wmt19-en-de'''
_lowerCamelCase : Optional[int] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase : Optional[int] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
_lowerCamelCase : Optional[Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_lowerCamelCase : Dict = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
_lowerCamelCase : Union[str, Any] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 704
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 705
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 0
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
def __lowerCamelCase (UpperCAmelCase__ : Namespace ):
return TrainCommand(UpperCAmelCase__ )
class lowercase ( a ):
@staticmethod
def __snake_case( _UpperCamelCase : ArgumentParser ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=_UpperCamelCase , required=_UpperCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=_UpperCamelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=_UpperCamelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=_UpperCamelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=_UpperCamelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=_UpperCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=_UpperCamelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=_UpperCamelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=_UpperCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=_UpperCamelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=_UpperCamelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=_UpperCamelCase , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=_UpperCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=_UpperCamelCase )
def __init__( self : Union[str, Any] , _UpperCamelCase : Namespace ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = logging.get_logger("transformers-cli/training" )
SCREAMING_SNAKE_CASE = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=_UpperCamelCase )
SCREAMING_SNAKE_CASE = args.output
SCREAMING_SNAKE_CASE = args.column_label
SCREAMING_SNAKE_CASE = args.column_text
SCREAMING_SNAKE_CASE = args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE = None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE = args.validation_split
SCREAMING_SNAKE_CASE = args.train_batch_size
SCREAMING_SNAKE_CASE = args.valid_batch_size
SCREAMING_SNAKE_CASE = args.learning_rate
SCREAMING_SNAKE_CASE = args.adam_epsilon
def __snake_case( self : Dict ) -> Any:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
raise NotImplementedError
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 706
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """ernie_m"""
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , _UpperCamelCase : int = 250_002 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 3_072 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 514 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=0.0 , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = act_dropout
| 647
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str=7 , _UpperCamelCase : str=3 , _UpperCamelCase : Tuple=18 , _UpperCamelCase : List[Any]=30 , _UpperCamelCase : Tuple=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : str=None , _UpperCamelCase : Dict=True , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 20}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_flip_channel_order
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = MobileViTImageProcessor if is_vision_available() else None
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTImageProcessingTester(self )
@property
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_flip_channel_order" ) )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
pass
def __snake_case( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 708
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase ( a ):
lowercase__ : Any = """speech_to_text"""
lowercase__ : Tuple = ["""past_key_values"""]
lowercase__ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , _UpperCamelCase : Tuple=10_000 , _UpperCamelCase : Any=12 , _UpperCamelCase : Dict=2_048 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : Any=6 , _UpperCamelCase : Optional[Any]=2_048 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]="relu" , _UpperCamelCase : List[str]=256 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=1 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Optional[int]=6_000 , _UpperCamelCase : Dict=1_024 , _UpperCamelCase : Dict=2 , _UpperCamelCase : List[Any]=(5, 5) , _UpperCamelCase : str=1_024 , _UpperCamelCase : Dict=80 , _UpperCamelCase : int=1 , **_UpperCamelCase : Tuple , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE = max_source_positions
SCREAMING_SNAKE_CASE = max_target_positions
SCREAMING_SNAKE_CASE = num_conv_layers
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = conv_channels
SCREAMING_SNAKE_CASE = input_feat_per_channel
SCREAMING_SNAKE_CASE = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
| 709
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( a ):
lowercase__ : Any = ["""image_processor""", """tokenizer"""]
lowercase__ : Optional[Any] = """BlipImageProcessor"""
lowercase__ : Optional[int] = """AutoTokenizer"""
def __init__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
super().__init__(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self : Dict , _UpperCamelCase : ImageInput = None , _UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Union[bool, str, TruncationStrategy] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : int , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE = self.tokenizer
SCREAMING_SNAKE_CASE = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCamelCase )
return encoding_image_processor
def __snake_case( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : str , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 710
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __snake_case( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (32, 32)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
@property
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __snake_case( self : Tuple ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __snake_case( self : List[Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_UpperCamelCase )
@property
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
def extract(*_UpperCamelCase : List[str] , **_UpperCamelCase : Dict ):
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.ones([0] )
def __snake_case( self : List[str] , _UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.pixel_values.to(_UpperCamelCase )
return self
return Out()
return extract
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = self.dummy_image.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , safety_checker=_UpperCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = self.dummy_image.to(_UpperCamelCase )
# put models in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = vae.half()
SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , safety_checker=_UpperCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type="np" , image=_UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE = "BAAI/AltDiffusion"
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCamelCase , safety_checker=_UpperCamelCase , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = "A fantasy landscape, trending on artstation"
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_UpperCamelCase , output_type="np" , )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __snake_case( self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
SCREAMING_SNAKE_CASE = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
SCREAMING_SNAKE_CASE = "BAAI/AltDiffusion"
SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCamelCase , safety_checker=_UpperCamelCase , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = "A fantasy landscape, trending on artstation"
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_UpperCamelCase , output_type="np" , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 711
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 712
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 647
| 0
|
'''simple docstring'''
def __lowerCamelCase ():
return 1
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int = 2_0_0 ):
return two_pound(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 713
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
| 0
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : int = '''T5Config'''
class lowercase ( a ):
lowercase__ : Tuple = """mt5"""
lowercase__ : Tuple = MTaConfig
class lowercase ( a ):
lowercase__ : Optional[int] = """mt5"""
lowercase__ : List[Any] = MTaConfig
class lowercase ( a ):
lowercase__ : Union[str, Any] = """mt5"""
lowercase__ : List[Any] = MTaConfig
| 714
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] = None ):
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else ""
# apply OCR
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : str , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE = [flip_channel_order(_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 647
| 0
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=7 ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE = "636036"
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
SCREAMING_SNAKE_CASE = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ ).json()
return result["workflow_runs"]
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = get_daily_ci_runs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE = workflow_run["id"]
break
return workflow_run_id
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = get_last_daily_ci_runs(UpperCAmelCase__ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE = get_artifacts_links(worflow_run_id=UpperCAmelCase__ , token=UpperCAmelCase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase__ , artifact_url=UpperCAmelCase__ , output_dir=UpperCAmelCase__ , token=UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ):
get_last_daily_ci_artifacts(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , F"{artifact_name}.zip" )
if os.path.isfile(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = {}
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
with z.open(UpperCAmelCase__ ) as f:
SCREAMING_SNAKE_CASE = f.read().decode("UTF-8" )
return results
| 715
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647
| 0
|
_lowerCamelCase : Optional[Any] = 8.3_144_598
def __lowerCamelCase (UpperCAmelCase__ : float , UpperCAmelCase__ : float ):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_lowerCamelCase : str = 3_00
_lowerCamelCase : Tuple = 28
_lowerCamelCase : Tuple = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 716
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
if num <= 0:
raise ValueError("Input must be a positive integer" )
SCREAMING_SNAKE_CASE = [True] * (num + 1)
SCREAMING_SNAKE_CASE = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 717
|
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowerCamelCase : Tuple = {
'''xlm-roberta-base''': 5_12,
'''xlm-roberta-large''': 5_12,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_12,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_12,
'''xlm-roberta-large-finetuned-conll03-english''': 5_12,
'''xlm-roberta-large-finetuned-conll03-german''': 5_12,
}
class lowercase ( a ):
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Any="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Any="</s>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="<unk>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : List[Any]="<mask>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __snake_case( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Tuple , _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : Dict , _UpperCamelCase : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : Dict , _UpperCamelCase : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 718
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase (UpperCAmelCase__ : int ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
SCREAMING_SNAKE_CASE = []
for num in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def __lowerCamelCase ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 647
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ : int = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ : Any = False
lowercase__ : Any = False
def __snake_case( self : Dict , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[Any]=7 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=True , _UpperCamelCase : int=True , _UpperCamelCase : Union[str, Any]=99 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Any=32 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : int=37 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Optional[int]=512 , _UpperCamelCase : Any=16 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : str=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = embedding_size
def __snake_case( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case( self : Any , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFMobileBertModel(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFMobileBertForMaskedLM(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFMobileBertForNextSentencePrediction(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFMobileBertForPreTraining(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFMobileBertForSequenceClassification(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = TFMobileBertForMultipleChoice(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFMobileBertForTokenClassification(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFMobileBertForQuestionAnswering(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Dict ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCamelCase )
def __snake_case( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCamelCase )
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCamelCase )
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCamelCase )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCamelCase )
@slow
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE = TFMobileBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = [1, 6, 30_522]
self.assertEqual(output.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 )
| 719
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 720
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_lowerCamelCase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_lowerCamelCase : Any = [0, 25, 50]
_lowerCamelCase : List[Any] = [25, 50, 75]
_lowerCamelCase : Tuple = fuzz.membership.trimf(X, abca)
_lowerCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_lowerCamelCase : Optional[Any] = np.ones(75)
_lowerCamelCase : Union[str, Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_lowerCamelCase : Any = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_lowerCamelCase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_lowerCamelCase : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_lowerCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_lowerCamelCase : Dict = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_lowerCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_lowerCamelCase : List[str] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_lowerCamelCase : int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 721
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
SCREAMING_SNAKE_CASE = (boundary[1] - boundary[0]) / steps
SCREAMING_SNAKE_CASE = boundary[0]
SCREAMING_SNAKE_CASE = boundary[1]
SCREAMING_SNAKE_CASE = make_points(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = 0.0
y += (h / 2.0) * f(UpperCAmelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCAmelCase__ )
y += (h / 2.0) * f(UpperCAmelCase__ )
return y
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = a + h
while x < (b - h):
yield x
SCREAMING_SNAKE_CASE = x + h
def __lowerCamelCase (UpperCAmelCase__ : Any ): # enter your function here
SCREAMING_SNAKE_CASE = (x - 0) * (x - 0)
return y
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = 0.0 # Lower bound of integration
SCREAMING_SNAKE_CASE = 1.0 # Upper bound of integration
SCREAMING_SNAKE_CASE = 10.0 # define number of steps or resolution
SCREAMING_SNAKE_CASE = [a, b] # define boundary of integration
SCREAMING_SNAKE_CASE = method_a(UpperCAmelCase__ , UpperCAmelCase__ )
print(F"y = {y}" )
if __name__ == "__main__":
main()
| 700
|
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float = 1e-12 , UpperCAmelCase__ : int = 1_0_0 , ):
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(UpperCAmelCase__ , UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 647
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase ( a ):
@staticmethod
def __snake_case( _UpperCamelCase : ArgumentParser ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_UpperCamelCase , default=_UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_UpperCamelCase )
def __init__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : bool ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = cache
SCREAMING_SNAKE_CASE = force
SCREAMING_SNAKE_CASE = trust_remote_code
def __snake_case( self : Any ) -> int:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 701
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Any=48_000 , _UpperCamelCase : Optional[Any]=480 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 14_000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale="htk" , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case( self : str ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Tuple , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 647
| 0
|
import itertools
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(UpperCAmelCase__ ):
yield num
num += 1
def __lowerCamelCase (UpperCAmelCase__ : int = 1_0_0_0_1 ):
return next(itertools.islice(prime_generator() , nth - 1 , UpperCAmelCase__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {'''vocab_file''': '''sentencepiece.model'''}
_lowerCamelCase : Any = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
_lowerCamelCase : List[str] = {
'''google/rembert''': 2_56,
}
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , _UpperCamelCase : str , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : List[Any]="[SEP]" , _UpperCamelCase : Dict="[UNK]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Union[str, Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : Optional[int]="[MASK]" , **_UpperCamelCase : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor()
self.sp_model.Load(_UpperCamelCase )
@property
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
return len(self.sp_model )
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Optional[Any] , _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __snake_case( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(_UpperCamelCase )
return pieces
def __snake_case( self : Dict , _UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
return self.sp_model.PieceToId(_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
return self.sp_model.IdToPiece(_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.sp_model.decode_pieces(_UpperCamelCase )
return out_string
def __snake_case( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(_UpperCamelCase ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,)
| 703
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 704
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 0
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCAmelCase__ )
class lowercase ( a ):
lowercase__ : List[str] = """sigmoid"""
lowercase__ : str = """softmax"""
lowercase__ : List[Any] = """none"""
@add_end_docstrings(
a , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class lowercase ( a ):
lowercase__ : Optional[int] = False
lowercase__ : int = ClassificationFunction.NONE
def __init__( self : Optional[Any] , **_UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __snake_case( self : int , _UpperCamelCase : List[str]=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : str="" , **_UpperCamelCase : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , _UpperCamelCase , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Any , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = super().__call__(*_UpperCamelCase , **_UpperCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = "top_k" not in kwargs
if isinstance(args[0] , _UpperCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , **_UpperCamelCase : List[Any] ) -> Dict[str, GenericTensor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.framework
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return self.tokenizer(**_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) == 1 and isinstance(inputs[0] , _UpperCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_UpperCamelCase , **_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.model(**_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Any=True ) -> Tuple:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs["logits"][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(_UpperCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(_UpperCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(_UpperCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda _UpperCamelCase : x["score"] , reverse=_UpperCamelCase )
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 705
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 706
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """ernie_m"""
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , _UpperCamelCase : int = 250_002 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 3_072 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 514 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=0.0 , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = act_dropout
| 647
| 0
|
_lowerCamelCase : Optional[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase : Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Tuple = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 708
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647
| 0
|
_lowerCamelCase : int = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
_lowerCamelCase : List[Any] = {value: key for key, value in encode_dict.items()}
def __lowerCamelCase (UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def __lowerCamelCase (UpperCAmelCase__ : str ):
if set(UpperCAmelCase__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
SCREAMING_SNAKE_CASE = ""
for word in coded.split():
while len(UpperCAmelCase__ ) != 0:
decoded += decode_dict[word[:5]]
SCREAMING_SNAKE_CASE = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 0
|
import numpy as np
from PIL import Image
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
return updated_arr
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
_lowerCamelCase : Tuple = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 710
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE__ ( yaml.SafeLoader ):
def __snake_case( self : Any , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE = [tuple(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE = Counter(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int]=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = super().construct_mapping(_UpperCamelCase , deep=_UpperCamelCase )
self._check_no_duplicates_on_constructed_node(_UpperCamelCase )
return mapping
def __lowerCamelCase (UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE__ ( a ):
# class attributes
lowercase__ : Any = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __snake_case( cls : int , _UpperCamelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(_UpperCamelCase , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_UpperCamelCase )
else:
return cls()
def __snake_case( self : Optional[int] , _UpperCamelCase : Path ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(_UpperCamelCase , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE = readme_file.read()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self._to_readme(_UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_UpperCamelCase )
def __snake_case( self : str , _UpperCamelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(_UpperCamelCase )
SCREAMING_SNAKE_CASE = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __snake_case( cls : List[Any] , _UpperCamelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE = yaml.load(_UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_UpperCamelCase )
def __snake_case( self : str ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_UpperCamelCase , allow_unicode=_UpperCamelCase , encoding="utf-8" , ).decode("utf-8" )
_lowerCamelCase : Tuple = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCamelCase : List[str] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
_lowerCamelCase : Any = ap.parse_args()
_lowerCamelCase : Tuple = Path(args.readme_filepath)
_lowerCamelCase : Optional[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 711
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : List[str] , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Dict[str, int]] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name="crop_size" )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __snake_case( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : str , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE = get_resize_output_image_size(_UpperCamelCase , size=size["shortest_edge"] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : str , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Dict ) -> np.ndarray:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : int = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[float] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_UpperCamelCase : Any , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , param_name="crop_size" , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 712
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 647
| 0
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowerCamelCase : List[Any] = '''examples/'''
_lowerCamelCase : str = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_lowerCamelCase : Optional[Any] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_lowerCamelCase : int = '''README.md'''
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] ):
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE = replace.replace("VERSION" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : List[str] ):
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern="examples" )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE = "1. Want to contribute a new model?"
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase__ )
def __lowerCamelCase ():
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["init"][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE = input(F"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = default_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = get_version()
SCREAMING_SNAKE_CASE = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE = input(F"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = dev_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowerCamelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 713
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
while len(UpperCAmelCase__ ) > 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
start.append(UpperCAmelCase__ )
end.append(UpperCAmelCase__ )
collection.remove(UpperCAmelCase__ )
collection.remove(UpperCAmelCase__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 714
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] = None ):
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else ""
# apply OCR
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : str , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE = [flip_channel_order(_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 647
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( a , unittest.TestCase ):
lowercase__ : Union[str, Any] = KandinskyVaaPipeline
lowercase__ : str = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowercase__ : Optional[Any] = ["""image_embeds""", """negative_image_embeds"""]
lowercase__ : Tuple = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase__ : Any = False
@property
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
return 32
@property
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
return 100
@property
def __snake_case( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_unet
SCREAMING_SNAKE_CASE = self.dummy_movq
SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : int=0 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
if str(_UpperCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "cpu"
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "red cat, 4k photo"
SCREAMING_SNAKE_CASE = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipeline(
image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , output_type="np" , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 715
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCamelCase : Optional[int] = (7_20, 12_80) # Height, Width
_lowerCamelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCamelCase : List[Any] = 1 / 1_00
_lowerCamelCase : int = ''''''
_lowerCamelCase : List[Any] = ''''''
_lowerCamelCase : Dict = ''''''
_lowerCamelCase : Optional[Any] = 2_50
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
for index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = random.sample(range(len(UpperCAmelCase__ ) ) , 4 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = update_image_and_anno(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , filter_scale=UpperCAmelCase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE = random_chars(3_2 )
SCREAMING_SNAKE_CASE = path.split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , UpperCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
SCREAMING_SNAKE_CASE = []
for anno in new_annos:
SCREAMING_SNAKE_CASE = anno[3] - anno[1]
SCREAMING_SNAKE_CASE = anno[4] - anno[2]
SCREAMING_SNAKE_CASE = anno[1] + width / 2
SCREAMING_SNAKE_CASE = anno[2] + height / 2
SCREAMING_SNAKE_CASE = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(UpperCAmelCase__ )
with open(F"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(UpperCAmelCase__ , "*.txt" ) ):
SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(UpperCAmelCase__ ) as in_file:
SCREAMING_SNAKE_CASE = in_file.readlines()
SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , F"{label_name}.jpg" )
SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE = obj_list.rstrip("\n" ).split(" " )
SCREAMING_SNAKE_CASE = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
return img_paths, labels
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : list , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[float, float] , UpperCAmelCase__ : float = 0.0 , ):
SCREAMING_SNAKE_CASE = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, index in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = all_img_list[index]
path_list.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = all_annos[index]
SCREAMING_SNAKE_CASE = cva.imread(UpperCAmelCase__ )
if i == 0: # top-left
SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = bbox[1] * scale_x
SCREAMING_SNAKE_CASE = bbox[2] * scale_y
SCREAMING_SNAKE_CASE = bbox[3] * scale_x
SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE = bbox[2] * scale_y
SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = bbox[1] * scale_x
SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE = bbox[3] * scale_x
SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE = cva.resize(
UpperCAmelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 716
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
SCREAMING_SNAKE_CASE = TapasConfig.from_json_file(UpperCAmelCase__ )
# set absolute/relative position embeddings parameter
SCREAMING_SNAKE_CASE = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=UpperCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = True
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE = 0.66_4694
SCREAMING_SNAKE_CASE = 0.20_7951
SCREAMING_SNAKE_CASE = 0.12_1194
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0.035_2513
SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=UpperCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = False
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE = 36.4519
SCREAMING_SNAKE_CASE = 0.90_3421
SCREAMING_SNAKE_CASE = 222.088
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 0.76_3141
SCREAMING_SNAKE_CASE = TapasForQuestionAnswering(config=UpperCAmelCase__ )
elif task == "TABFACT":
SCREAMING_SNAKE_CASE = TapasForSequenceClassification(config=UpperCAmelCase__ )
elif task == "MLM":
SCREAMING_SNAKE_CASE = TapasForMaskedLM(config=UpperCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
SCREAMING_SNAKE_CASE = TapasModel(config=UpperCAmelCase__ )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCAmelCase__ )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
SCREAMING_SNAKE_CASE = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + "vocab.txt" , model_max_length=5_1_2 )
tokenizer.save_pretrained(UpperCAmelCase__ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 717
|
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Optional[Any] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase : int = {
'''gpt-neox-20b''': 20_48,
}
class lowercase ( a ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]="<|endoftext|>" , _UpperCamelCase : Tuple="<|endoftext|>" , _UpperCamelCase : Union[str, Any]="<|endoftext|>" , _UpperCamelCase : Union[str, Any]=False , **_UpperCamelCase : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = add_prefix_space
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 718
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase (UpperCAmelCase__ : int ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
SCREAMING_SNAKE_CASE = []
for num in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def __lowerCamelCase ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 647
| 0
|
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """mra"""
def __init__( self : Optional[Any] , _UpperCamelCase : Union[str, Any]=50_265 , _UpperCamelCase : Dict=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : str=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : Tuple=1e-5 , _UpperCamelCase : List[Any]="absolute" , _UpperCamelCase : Any=4 , _UpperCamelCase : str="full" , _UpperCamelCase : List[str]=0 , _UpperCamelCase : Any=0 , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=2 , **_UpperCamelCase : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = block_per_row
SCREAMING_SNAKE_CASE = approx_mode
SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
| 720
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 0
|
class lowercase ( a ):
pass
class lowercase ( a ):
pass
class lowercase :
def __init__( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
[],
[],
[],
]
def __snake_case( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(_UpperCamelCase )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : Dict ) -> str:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class lowercase :
def __init__( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
def __snake_case( self : Optional[int] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(_UpperCamelCase )
def __snake_case( self : Any ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
SCREAMING_SNAKE_CASE = min(self.queue )
self.queue.remove(_UpperCamelCase )
return data
def __str__( self : List[Any] ) -> str:
'''simple docstring'''
return str(self.queue )
def __lowerCamelCase():
SCREAMING_SNAKE_CASE = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(UpperCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCAmelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCamelCase():
SCREAMING_SNAKE_CASE = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(UpperCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCAmelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 721
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647
| 0
|
from __future__ import annotations
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : list[float] ):
return np.maximum(0 , UpperCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 700
|
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float = 1e-12 , UpperCAmelCase__ : int = 1_0_0 , ):
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(UpperCAmelCase__ , UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 647
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Any=13 , _UpperCamelCase : str=32 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : int=4 , _UpperCamelCase : Dict=[10, 20, 30, 40] , _UpperCamelCase : int=[2, 2, 3, 2] , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : Tuple=["stage2", "stage3", "stage4"] , _UpperCamelCase : Union[str, Any]=[2, 3, 4] , _UpperCamelCase : int=None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_stages
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = out_features
SCREAMING_SNAKE_CASE = out_indices
SCREAMING_SNAKE_CASE = scope
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case( self : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ConvNextModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ConvNextForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ConvNextBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = ConvNextBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Union[str, Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Any = True
lowercase__ : str = False
lowercase__ : List[Any] = False
lowercase__ : List[str] = False
lowercase__ : List[str] = False
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ConvNextModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def __snake_case( self : int ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(_UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ConvNextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : Dict ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def __snake_case( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase , a ):
lowercase__ : Any = (ConvNextBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = ConvNextConfig
lowercase__ : Union[str, Any] = False
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ConvNextModelTester(self )
| 701
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Any=48_000 , _UpperCamelCase : Optional[Any]=480 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 14_000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale="htk" , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case( self : str ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Tuple , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 647
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = '''AGGTAB'''
_lowerCamelCase : Optional[Any] = '''GXTXAYB'''
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : str = '''GTAB'''
_lowerCamelCase : Tuple = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 702
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : List[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(UpperCAmelCase__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace("*" , UpperCAmelCase__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE = "weight"
else:
SCREAMING_SNAKE_CASE = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE = name.split("." )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int=None ):
# load the pre-trained checkpoints
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = WavLMConfigOrig(checkpoint["cfg"] )
SCREAMING_SNAKE_CASE = WavLMOrig(UpperCAmelCase__ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE = WavLMConfig.from_pretrained(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = WavLMConfig()
SCREAMING_SNAKE_CASE = WavLMModel(UpperCAmelCase__ )
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ )
hf_wavlm.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 703
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647
| 0
|
class lowercase :
def __init__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
def __snake_case( self : str ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_UpperCamelCase , " -> " , " -> ".join([str(_UpperCamelCase ) for j in self.vertex[i]] ) )
def __snake_case( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCamelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE = [to_vertex]
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : list ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
print(_UpperCamelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase : Tuple = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 704
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowercase ( a ):
lowercase__ : Tuple = """efficientformer"""
def __init__( self : Union[str, Any] , _UpperCamelCase : List[int] = [3, 2, 6, 4] , _UpperCamelCase : List[int] = [48, 96, 224, 448] , _UpperCamelCase : List[bool] = [True, True, True, True] , _UpperCamelCase : int = 448 , _UpperCamelCase : int = 32 , _UpperCamelCase : int = 4 , _UpperCamelCase : int = 7 , _UpperCamelCase : int = 5 , _UpperCamelCase : int = 8 , _UpperCamelCase : int = 4 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : int = 16 , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : int = 1 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : float = 1e-5 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : float = 1e-12 , _UpperCamelCase : int = 224 , _UpperCamelCase : float = 1e-05 , **_UpperCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 705
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 0
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" , return_dict=_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=_UpperCamelCase , output_type="numpy" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 706
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """ernie_m"""
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , _UpperCamelCase : int = 250_002 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 3_072 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 514 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=0.0 , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = act_dropout
| 647
| 0
|
# Algorithm for the pigeonhole sorting
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE = 0
for count in range(UpperCAmelCase__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE = count + min_val
i += 1
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCAmelCase__ )
print("Sorted order is:" , " ".join(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 0
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = BigBirdConfig.from_json_file(UpperCAmelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
SCREAMING_SNAKE_CASE = BigBirdForQuestionAnswering(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = BigBirdForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase__ , UpperCAmelCase__ , is_trivia_qa=UpperCAmelCase__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
_lowerCamelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 708
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowerCamelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowerCamelCase : Optional[int] = TaTokenizerFast
_lowerCamelCase : Optional[int] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowerCamelCase : Any = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 709
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCamelCase (UpperCAmelCase__ : int ):
def wrapper(*UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = timeit.default_timer()
SCREAMING_SNAKE_CASE = func(*UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __lowerCamelCase (UpperCAmelCase__ : dict , UpperCAmelCase__ : List[Any]=1_0_0 , UpperCAmelCase__ : Optional[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase__ , _ArrayXD ):
SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase__ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE = "The small grey turtle was surprisingly fast when challenged."
else:
SCREAMING_SNAKE_CASE = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase__ , datasets.Sequence ):
while isinstance(UpperCAmelCase__ , datasets.Sequence ):
SCREAMING_SNAKE_CASE = v.feature
SCREAMING_SNAKE_CASE = seq_shapes[k]
SCREAMING_SNAKE_CASE = np.random.rand(*UpperCAmelCase__ ).astype(v.dtype )
SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=1_0_0 , UpperCAmelCase__ : str=None ):
SCREAMING_SNAKE_CASE = generate_examples(UpperCAmelCase__ , num_examples=UpperCAmelCase__ , seq_shapes=UpperCAmelCase__ )
with ArrowWriter(features=UpperCAmelCase__ , path=UpperCAmelCase__ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE = features.encode_example(UpperCAmelCase__ )
writer.write(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=UpperCAmelCase__ , info=datasets.DatasetInfo(features=UpperCAmelCase__ ) )
return dataset
| 710
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 0
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCamelCase : Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
image=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 711
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCamelCase : Union[str, Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase ( unittest.TestCase ):
lowercase__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ZeroShotClassificationPipeline(
model=_UpperCamelCase , tokenizer=_UpperCamelCase , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case( self : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
SCREAMING_SNAKE_CASE = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
_UpperCamelCase , [
{"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
_UpperCamelCase , [
{"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_UpperCamelCase ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(_UpperCamelCase ):
classifier(_UpperCamelCase , candidate_labels="politics" )
with self.assertRaises(_UpperCamelCase ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(_UpperCamelCase ):
classifier("Who are you voting for in 2020?" , candidate_labels=_UpperCamelCase )
with self.assertRaises(_UpperCamelCase ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(_UpperCamelCase ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=_UpperCamelCase , )
self.run_entailment_id(_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : Pipeline ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE = config.labelaid
SCREAMING_SNAKE_CASE = zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE = original_labelaid
self.assertEqual(_UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def __snake_case( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 712
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 647
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
SCREAMING_SNAKE_CASE = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(_UpperCamelCase )
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset("nielsr/rvlcdip-demo" )
SCREAMING_SNAKE_CASE = dataset["train"][0]["image"].convert("RGB" )
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.Size((1, 16) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_UpperCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 713
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.