code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class a__ ( a_, unittest.TestCase ):
__lowerCAmelCase = XLMProphetNetTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def __magic_name__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Union[str, Any] = XLMProphetNetTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Dict = "[PAD]"
lowercase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __magic_name__ ( self ):
lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 1_012 )
def __magic_name__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def __magic_name__ ( self ):
lowercase : List[Any] = XLMProphetNetTokenizer(__a , keep_accents=__a )
lowercase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def __magic_name__ ( self ):
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def __magic_name__ ( self ):
lowercase : List[str] = "Hello World!"
lowercase : List[str] = [35_389, 6_672, 49, 2]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def __magic_name__ ( self ):
# fmt: off
lowercase : Any = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 361
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase ):
def __init__( self : Dict , __a : List[Any] , __a : Optional[Any] ) -> Dict:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Tuple , __a : int = 1 , __a : int = 1_0_0 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[float] = None , __a : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__UpperCAmelCase = int(__a )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
__UpperCAmelCase = int(__a )
__UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__UpperCAmelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
# set step values
self.scheduler.set_timesteps(__a , device=audio.device )
__UpperCAmelCase = self.scheduler.timesteps.to(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(__a , __a ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase = self.scheduler.step(__a , __a , __a ).prev_sample
__UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a )
| 262
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """gpt_neox"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Any=5_04_32 , __SCREAMING_SNAKE_CASE : Dict=61_44 , __SCREAMING_SNAKE_CASE : Any=44 , __SCREAMING_SNAKE_CASE : Optional[Any]=64 , __SCREAMING_SNAKE_CASE : Tuple=2_45_76 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.25 , __SCREAMING_SNAKE_CASE : List[str]=1_00_00 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Tuple=20_48 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : int , ):
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = rotary_pct
__a = rotary_emb_base
__a = attention_dropout
__a = hidden_dropout
__a = classifier_dropout
__a = initializer_range
__a = layer_norm_eps
__a = use_cache
__a = tie_word_embeddings
__a = use_parallel_residual
__a = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _UpperCAmelCase ( self : Optional[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
__a = self.rope_scaling.get("type" , __SCREAMING_SNAKE_CASE )
__a = self.rope_scaling.get("factor" , __SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 721
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __A ( _A , _A=False , _A=False ):
"""simple docstring"""
__a = "backbone." if is_semantic else ""
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __A ( _A , _A , _A=False , _A=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__a = "backbone." if is_semantic else ""
# queries, keys and values
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
__a = in_proj_weight[
: config.hidden_size, :
]
__a = q_bias
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
__a = gamma_a
__a = gamma_a
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = dct.pop(_A )
__a = val
def __A ( ):
"""simple docstring"""
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __A ( _A , _A , _A=False ):
"""simple docstring"""
__a = False if "rvlcdip" in checkpoint_url else True
__a = BeitConfig(use_absolute_position_embeddings=_A , use_mask_token=_A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__a = 1024
__a = 4096
__a = 24
__a = 16
# labels
if "rvlcdip" in checkpoint_url:
__a = 16
__a = "huggingface/label-files"
__a = "rvlcdip-id2label.json"
__a = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__a = torch.hub.load_state_dict_from_url(_A , map_location="cpu" )["model"]
__a = create_rename_keys(_A , has_lm_head=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , has_lm_head=_A )
# load HuggingFace model
__a = BeitForMaskedImageModeling(_A ) if has_lm_head else BeitForImageClassification(_A )
model.eval()
model.load_state_dict(_A )
# Check outputs on an image
__a = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_A )
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="pt" )
__a = encoding["pixel_values"]
__a = model(_A )
__a = outputs.logits
# verify logits
__a = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_A ), "Shape of logits not as expected"
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
if has_lm_head:
__a = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__a = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_A , )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_A , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 525
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Dict = 0
snake_case_ : Optional[int] = number
while duplicate > 0:
snake_case_ , snake_case_ : Any = divmod(SCREAMING_SNAKE_CASE__ , 1_0 )
fact_sum += factorial(SCREAMING_SNAKE_CASE__ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
a_ = int(input('''Enter number: ''').strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 480
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__=1_25 , lowercase__=None , **lowercase__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Optional[Any] = [f'<extra_id_{i}>' for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ : int = len(set(filter(lambda lowercase__ : bool("""extra_id""" in str(lowercase__ ) ) , lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case_ : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
snake_case_ : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
snake_case_ : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , extra_ids=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : List[str] = extra_ids
snake_case_ : Union[str, Any] = 2**8 # utf is 8 bits
# define special tokens dict
snake_case_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case_ : List[Any] = len(self.special_tokens_encoder )
snake_case_ : Tuple = len(lowercase__ )
for i, token in enumerate(lowercase__ ):
snake_case_ : Union[str, Any] = self.vocab_size + i - n
snake_case_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCamelCase (self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def __UpperCamelCase (self , lowercase__ ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Union[str, Any] = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ : int = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : List[str] = [chr(lowercase__ ) for i in text.encode("""utf-8""" )]
return tokens
def __UpperCamelCase (self , lowercase__ ):
if token in self.special_tokens_encoder:
snake_case_ : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case_ : Optional[int] = self.added_tokens_encoder[token]
elif len(lowercase__ ) != 1:
snake_case_ : Dict = self.unk_token_id
else:
snake_case_ : List[str] = ord(lowercase__ ) + self._num_special_tokens
return token_id
def __UpperCamelCase (self , lowercase__ ):
if index in self.special_tokens_decoder:
snake_case_ : Tuple = self.special_tokens_decoder[index]
else:
snake_case_ : Dict = chr(index - self._num_special_tokens )
return token
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case_ : List[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case_ : Union[str, Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case_ : Optional[int] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case_ : Any = token.encode("""utf-8""" )
else:
snake_case_ : Dict = bytes([ord(lowercase__ )] )
bstring += tok_string
snake_case_ : List[Any] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
return ()
| 480
| 1
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A : Optional[Any] = random.Random()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=7 , __a=4_00 , __a=20_00 , __a=10 , __a=1_60 , __a=8 , __a=0.0 , __a=40_00 , __a=False , __a=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = padding_value
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = do_normalize
__lowerCAmelCase = feature_size
__lowerCAmelCase = chunk_length
__lowerCAmelCase = hop_length
def snake_case ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self , __a=False , __a=False ):
def _flatten(__a ):
return list(itertools.chain(*__a ) )
if equal_length:
__lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =WhisperFeatureExtractor if is_speech_available() else None
def snake_case ( self ):
__lowerCAmelCase = WhisperFeatureExtractionTester(self )
def snake_case ( self ):
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
__lowerCAmelCase = self.feature_extraction_class.from_pretrained(__a )
__lowerCAmelCase = feat_extract_first.to_dict()
__lowerCAmelCase = feat_extract_second.to_dict()
__lowerCAmelCase = feat_extract_first.mel_filters
__lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "feat_extract.json" )
feat_extract_first.to_json_file(__a )
__lowerCAmelCase = self.feature_extraction_class.from_json_file(__a )
__lowerCAmelCase = feat_extract_first.to_dict()
__lowerCAmelCase = feat_extract_second.to_dict()
__lowerCAmelCase = feat_extract_first.mel_filters
__lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def snake_case ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = [np.asarray(__a ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase = feature_extractor(__a , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCAmelCase = np.asarray(__a )
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test truncation required
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
__lowerCAmelCase = [np.asarray(__a ) for speech_input in speech_inputs]
__lowerCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCAmelCase = [np.asarray(__a ) for speech_input in speech_inputs_truncated]
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
def snake_case ( self ):
import torch
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
__lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case ( self , __a ):
__lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort("id" ).select(range(__a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case ( self ):
# fmt: off
__lowerCAmelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = WhisperFeatureExtractor()
__lowerCAmelCase = feature_extractor(__a , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __a , atol=1e-4 ) )
def snake_case ( self ):
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = self._load_datasamples(1 )[0]
__lowerCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
__lowerCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__a )[0]
self.assertTrue(np.all(np.mean(__a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a ) - 1 ) < 1e-3 ) )
| 706
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a=1 , __a=False , **__a ):
super().__init__(**__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_embed
__lowerCAmelCase = d_proj
__lowerCAmelCase = cutoffs + [vocab_size]
__lowerCAmelCase = [0] + self.cutoffs
__lowerCAmelCase = div_val
__lowerCAmelCase = self.cutoffs[0]
__lowerCAmelCase = len(self.cutoffs ) - 1
__lowerCAmelCase = self.shortlist_size + self.n_clusters
__lowerCAmelCase = keep_order
__lowerCAmelCase = []
__lowerCAmelCase = []
def snake_case ( self , __a ):
if self.n_clusters > 0:
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__a , name="cluster_weight" )
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__a , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" , )
self.out_projs.append(__a )
else:
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCAmelCase = self.d_embed // (self.div_val**i)
__lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" )
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(__a )
@staticmethod
def snake_case ( __a , __a , __a , __a=None ):
__lowerCAmelCase = x
if proj is not None:
__lowerCAmelCase = tf.einsum("ibd,ed->ibe" , __a , __a )
return tf.einsum("ibd,nd->ibn" , __a , __a ) + b
@staticmethod
def snake_case ( __a , __a ):
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
__lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(__a , __a )
def snake_case ( self , __a , __a , __a=True , __a=False ):
__lowerCAmelCase = 0
if self.n_clusters == 0:
__lowerCAmelCase = self._logit(__a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__a , logits=__a )
__lowerCAmelCase = tf.nn.log_softmax(__a , axis=-1 )
else:
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = []
__lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowerCAmelCase = (target >= l_idx) & (target < r_idx)
__lowerCAmelCase = tf.where(__a )
__lowerCAmelCase = tf.boolean_mask(__a , __a ) - l_idx
if self.div_val == 1:
__lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
__lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
__lowerCAmelCase = self.out_layers[i][0]
__lowerCAmelCase = self.out_layers[i][1]
if i == 0:
__lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
__lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[0] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
else:
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[i] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
__lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__a )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__a , -cur_logprob , shape_list(__a ) )
__lowerCAmelCase = tf.concat(__a , axis=-1 )
if target is not None:
if return_mean:
__lowerCAmelCase = tf.reduce_mean(__a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__a , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 282
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCAmelCase: List[Any] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCAmelCase: List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowercase( __a : str ):
if "://" in dataset_path:
a__ =dataset_path.split('://' )[1]
return dataset_path
def _lowercase( __a : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowercase( __a : fsspec.AbstractFileSystem , __a : str , __a : str ):
a__ =not is_remote_filesystem(__a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) )
else:
fs.mv(__a , __a , recursive=__a )
def _lowercase( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
a__ =None
a__ =None
a__ =threading.Lock()
| 20
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__UpperCAmelCase =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
A__ = eval_examples
A__ = post_process_function
A__ = quant_trainer_args
A__ = 1_28 # default number of calibration samples
def lowercase_ ( self , UpperCamelCase__=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
A__ = calib_dataset if calib_dataset is not None else self.calib_dataset
A__ = self._remove_unused_columns(UpperCamelCase__ , description="Calibration" )
return DataLoader(
UpperCamelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase__ , )
def lowercase_ ( self , UpperCamelCase__=None ):
'''simple docstring'''
A__ = self.train_dataset if calib_dataset is None else calib_dataset
A__ = self.get_calib_dataloader(UpperCamelCase__ )
A__ = self.model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args , calib=UpperCamelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase__ )
logger.info("***** Running calibration *****" )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCamelCase__ ):
# Prediction step
A__ , A__ , A__ = self.prediction_step(UpperCamelCase__ , UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase__ , self.quant_trainer_args )
A__ = model
def lowercase_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = "eval" ):
'''simple docstring'''
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase__ )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
A__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
A__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
A__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase__ )
self.log(UpperCamelCase__ )
else:
A__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" ):
'''simple docstring'''
A__ = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
A__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , "predict" )
A__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__="./" ):
'''simple docstring'''
A__ = self.eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase__ )
A__ = next(iter(UpperCamelCase__ ) )
# saving device - to make it consistent
A__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
A__ = tuple(v.to(UpperCamelCase__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
A__ = True
A__ = self.model.to(UpperCamelCase__ )
model.eval()
model.float()
A__ = model.module if hasattr(UpperCamelCase__ , "module" ) else model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args )
A__ = os.path.join(UpperCamelCase__ , "model.onnx" )
logger.info(f"""exporting model to {output_model_file}""" )
A__ = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , export_params=UpperCamelCase__ , opset_version=13 , do_constant_folding=UpperCamelCase__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=UpperCamelCase__ , )
logger.info("onnx export finished" )
| 337
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase : Dict ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase : str =importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase : Optional[int] =spec.loader.load_module()
UpperCAmelCase : str =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase : Optional[int] =re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
UpperCAmelCase : Optional[Any] ={
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def _lowerCAmelCase ():
UpperCamelCase_ = []
for config_class in list(CONFIG_MAPPING.values()):
UpperCamelCase_ = False
# source code of `config_class`
UpperCamelCase_ = inspect.getsource(_lowerCAmelCase)
UpperCamelCase_ = _re_checkpoint.findall(_lowerCAmelCase)
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase_ , UpperCamelCase_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase_ = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase_ = True
break
UpperCamelCase_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase)
if len(_lowerCAmelCase) > 0:
UpperCamelCase_ = "\n".join(sorted(_lowerCAmelCase))
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""")
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 504
|
import re
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$")
if match := re.search(_lowerCAmelCase , _lowerCAmelCase):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 504
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class __snake_case ( UpperCamelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "falcon"
__SCREAMING_SNAKE_CASE = ["past_key_values"]
def __init__( self , _UpperCamelCase=6_50_24 , _UpperCamelCase=45_44 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1E-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ) -> Optional[Any]:
"""simple docstring"""
__snake_case = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case = kwargs.pop("""n_embed""" , __lowerCamelCase )
__snake_case = hidden_size if n_embed is None else n_embed
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = num_attention_heads if num_kv_heads is None else num_kv_heads
__snake_case = alibi
__snake_case = new_decoder_architecture
__snake_case = multi_query # Ignored when new_decoder_architecture is True
__snake_case = parallel_attn
__snake_case = bias
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@property
def a ( self ) -> Dict:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def a ( self ) -> str:
"""simple docstring"""
return not self.alibi
| 268
|
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase__ ) == 26
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase__( )->None:
from timeit import timeit
A__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=UpperCamelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=UpperCamelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=UpperCamelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 190
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaControlnetPipeline
a = ["image_embeds", "negative_image_embeds", "hint"]
a = ["image_embeds", "negative_image_embeds", "hint"]
a = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a = False
@property
def lowercase_ ( self : Optional[Any] ) -> Any:
return 32
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return 32
@property
def lowercase_ ( self : Any ) -> Tuple:
return self.time_input_dim
@property
def lowercase_ ( self : int ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def lowercase_ ( self : int ) -> Optional[int]:
return 100
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : List[str] ) -> Dict:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Optional[int] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=0 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create hint
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 255.0
SCREAMING_SNAKE_CASE__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipeline(
image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 472
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = original_name.split('''.''' )[0]
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find('''proj''' )]
SCREAMING_SNAKE_CASE__ = key.replace(_A , F'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm1''' , '''before_norm''' )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''head''' , '''classifier''' )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 10_00)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(_A )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 472
| 1
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a__ ( _UpperCamelCase : List[str]=None ,_UpperCamelCase : Optional[Any]=None ):
return field(default_factory=lambda: default ,metadata=_UpperCamelCase )
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowerCAmelCase__ = list_field(
default=lowerCAmelCase__ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def a__ ( _UpperCamelCase : Dict ):
try:
int(_UpperCamelCase )
return True
except ValueError:
return False
def a__ ( _UpperCamelCase : Optional[Any] ):
try:
float(_UpperCamelCase )
return True
except ValueError:
return False
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = args
__lowerCamelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__lowerCamelCase = csv.DictReader(__UpperCAmelCase )
for row in reader:
__lowerCamelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__lowerCamelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__lowerCamelCase = float(row['''result'''] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = plt.subplots()
__lowerCamelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
__lowerCamelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowerCamelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__lowerCamelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__lowerCamelCase = self.result_dict[model_name]['''result''']
((__lowerCamelCase) ,(__lowerCamelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowerCamelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowerCamelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__UpperCAmelCase , )
else:
__lowerCamelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowerCamelCase) ,(__lowerCamelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__lowerCamelCase = np.asarray(__UpperCAmelCase , __UpperCAmelCase )[: len(__UpperCAmelCase )]
plt.scatter(
__UpperCAmelCase , __UpperCAmelCase , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(__UpperCAmelCase , __UpperCAmelCase , '''--''' )
title_str += F""" {label_model_name} vs."""
__lowerCamelCase = title_str[:-4]
__lowerCamelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(__UpperCAmelCase )
plt.xlabel(__UpperCAmelCase )
plt.ylabel(__UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a__ ( ):
__lowerCamelCase = HfArgumentParser(_UpperCamelCase )
__lowerCamelCase = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase = Plot(args=_UpperCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 175
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = """Hello, World!"""
a_ = """en_XX"""
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : bool ):
__lowerCamelCase = Path('''data_bin''' )
__lowerCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_UpperCamelCase ).parent ) ,checkpoint_file=Path(_UpperCamelCase ).name ,_name='''xmod_base''' ,arch='''xmod_base''' ,task='''multilingual_masked_lm''' ,data_name_or_path=str(_UpperCamelCase ) ,bpe='''sentencepiece''' ,sentencepiece_model=str(Path(_UpperCamelCase ).parent / '''sentencepiece.bpe.model''' ) ,src_dict=str(data_dir / '''dict.txt''' ) ,)
xmod.eval() # disable dropout
print(_UpperCamelCase )
__lowerCamelCase = xmod.model.encoder.sentence_encoder
__lowerCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_14 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,'''bottleneck''' ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' ,_UpperCamelCase )
__lowerCamelCase = XmodForSequenceClassification(_UpperCamelCase ) if classification_head else XmodForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase = xmod_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
__lowerCamelCase = xmod_layer.self_attn.q_proj.weight
__lowerCamelCase = xmod_layer.self_attn.q_proj.bias
__lowerCamelCase = xmod_layer.self_attn.k_proj.weight
__lowerCamelCase = xmod_layer.self_attn.k_proj.bias
__lowerCamelCase = xmod_layer.self_attn.v_proj.weight
__lowerCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
__lowerCamelCase = xmod_layer.self_attn.out_proj.weight
__lowerCamelCase = xmod_layer.self_attn.out_proj.bias
__lowerCamelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
# output
__lowerCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
__lowerCamelCase = xmod_layer.final_layer_norm.weight
__lowerCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase = xmod_layer.adapter_layer_norm.weight
__lowerCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase = bert_output.adapter_modules[lang_code]
__lowerCamelCase = xmod_layer.adapter_modules[lang_code]
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase = xmod_sent_encoder.layer_norm.weight
__lowerCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['''mnli'''].dense.weight
__lowerCamelCase = xmod.model.classification_heads['''mnli'''].dense.bias
__lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight
__lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__lowerCamelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = xmod.model.encoder.lm_head.weight
__lowerCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = xmod.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_UpperCamelCase )
__lowerCamelCase = model(_UpperCamelCase )[0]
if classification_head:
__lowerCamelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_UpperCamelCase ) )
else:
__lowerCamelCase = xmod.model(_UpperCamelCase ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCamelCase = torch.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1e-3 )
print('''Do both models output the same tensors?''' ,'''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase ,exist_ok=_UpperCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 175
| 1
|
'''simple docstring'''
def snake_case ( snake_case : List[str] , snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase = []
lowerCAmelCase = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
lowerCAmelCase = factorials.pop()
lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {"vocab_file": "spiece.model"}
_UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_UpperCamelCase : List[Any] = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowerCAmelCase = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = '<|endoftext|>' if eos_token is None else eos_token
lowerCAmelCase = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = '<pad>' if pad_token is None else pad_token
lowerCAmelCase = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
F'[{"".join(map(_SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.non_printing_characters_re.sub('' , _SCREAMING_SNAKE_CASE )
# Normalize whitespaces
lowerCAmelCase = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize('NFC' , _SCREAMING_SNAKE_CASE )
return text
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return out_string
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [self.preprocess_text(_SCREAMING_SNAKE_CASE ) for t in text]
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
return token_ids
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.decode(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(_SCREAMING_SNAKE_CASE ) + F'{self.bos_token}Bot:'
)
return self.encode(text=_SCREAMING_SNAKE_CASE )
| 514
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Any = KandinskyVaaInpaintPipeline
lowercase__ : List[str] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowercase__ : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowercase__ : str = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase__ : Union[str, Any] = False
@property
def lowercase_ ( self ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1_00
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
A__ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A__ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def lowercase_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCamelCase__ , )
A__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
A__ = np.ones((64, 64) , dtype=np.floataa )
A__ = 0
if str(UpperCamelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCamelCase__ )
else:
A__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCamelCase__ )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
'''simple docstring'''
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
A__ = np.ones((7_68, 7_68) , dtype=np.floataa )
A__ = 0
A__ = "a hat"
A__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A__ = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
A__ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ , A__ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A__ = pipeline(
image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 337
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__UpperCAmelCase =transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( A ) -> Dict:
'''simple docstring'''
if isinstance(A , torch.Tensor ):
return image
elif isinstance(A , PIL.Image.Image ):
A__ = [image]
A__ = [trans(img.convert("RGB" ) ) for img in image]
A__ = torch.stack(A )
return image
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
A__ = max(num_inference_steps - init_timestep , 0 )
A__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}""" )
A__ = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
A__ = init_latents.shape
A__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
print("add noise to latents at timestep" , UpperCamelCase__ )
A__ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 0.8 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ):
'''simple docstring'''
self.check_inputs(UpperCamelCase__ )
# 2. Preprocess image
A__ = preprocess(UpperCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
A__ , A__ = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device )
A__ = timesteps[:1].repeat(UpperCamelCase__ )
# 4. Prepare latent variables
A__ = self.prepare_latents(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.unet.dtype , self.device , UpperCamelCase__ )
A__ = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase__ ):
# 1. predict noise model_output
A__ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , eta=UpperCamelCase__ , use_clipped_model_output=UpperCamelCase__ , generator=UpperCamelCase__ , ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase__ )
| 337
| 1
|
'''simple docstring'''
import random
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Dict = a[left_index]
UpperCAmelCase : List[str] = left_index + 1
for j in range(left_index + 1 , _lowerCAmelCase ):
if a[j] < pivot:
UpperCAmelCase , UpperCAmelCase : Optional[int] = a[i], a[j]
i += 1
UpperCAmelCase , UpperCAmelCase : List[str] = a[i - 1], a[left_index]
return i - 1
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if left < right:
UpperCAmelCase : List[str] = random.randint(_lowerCAmelCase , right - 1 )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCAmelCase : Tuple = partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
quick_sort_random(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCAmelCase , pivot_index + 1 , _lowerCAmelCase ) # recursive quicksort to the right of the pivot point
def snake_case_ ( ) -> int:
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''' ).strip()
UpperCAmelCase : int = [int(_lowerCAmelCase ) for item in user_input.split(''',''' )]
quick_sort_random(_lowerCAmelCase , 0 , len(_lowerCAmelCase ) )
print(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 528
|
'''simple docstring'''
UpperCamelCase__: dict[tuple[int, int, int], int] = {}
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase : int = _calculate(days - 1 , _lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase : Tuple = _calculate(days - 1 , _lowerCAmelCase , 0 )
UpperCAmelCase : str = state_late + state_absent + state_ontime
UpperCAmelCase : List[Any] = prizestrings
return prizestrings
def snake_case_ ( _lowerCAmelCase : int = 30 ) -> int:
return _calculate(_lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 528
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
return 0.0
def UpperCAmelCase_ ( _UpperCAmelCase :np.ndarray , _UpperCAmelCase :int ) -> tuple[int | float, int | float]:
'''simple docstring'''
A_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase_ ( _UpperCAmelCase :FilterType , _UpperCAmelCase :int ) -> None:
'''simple docstring'''
A_ = 512
A_ = [1] + [0] * (size - 1)
A_ = [filter_type.process(_UpperCAmelCase ) for item in inputs]
A_ = [0] * (samplerate - size) # zero-padding
outputs += filler
A_ = np.abs(np.fft.fft(_UpperCAmelCase ) )
A_ = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A_ = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(_UpperCAmelCase )
plt.show()
def UpperCAmelCase_ ( _UpperCAmelCase :FilterType , _UpperCAmelCase :int ) -> None:
'''simple docstring'''
A_ = 512
A_ = [1] + [0] * (size - 1)
A_ = [filter_type.process(_UpperCAmelCase ) for item in inputs]
A_ = [0] * (samplerate - size) # zero-padding
outputs += filler
A_ = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 188
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a__ : Optional[Any] = None
try:
import msvcrt
except ImportError:
a__ : List[str] = None
try:
import fcntl
except ImportError:
a__ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a__ : List[str] = OSError
# Data
# ------------------------------------------------
a__ : Union[str, Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
a__ : str = '3.0.12'
a__ : Optional[int] = None
def UpperCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
global _logger
A_ = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self ,__snake_case ):
"""simple docstring"""
A_ = lock_file
return None
def __str__( self ):
"""simple docstring"""
A_ = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class UpperCAmelCase_ :
def __init__( self ,__snake_case ):
"""simple docstring"""
A_ = lock
return None
def __enter__( self ):
"""simple docstring"""
return self.lock
def __exit__( self ,__snake_case ,__snake_case ,__snake_case ):
"""simple docstring"""
self.lock.release()
return None
class UpperCAmelCase_ :
def __init__( self ,__snake_case ,__snake_case=-1 ,__snake_case=None ):
"""simple docstring"""
A_ = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
A_ = self.hash_filename_if_too_long(__snake_case ,__snake_case )
# The path to the lock file.
A_ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A_ = None
# The default timeout value.
A_ = timeout
# We use this lock primarily for the lock counter.
A_ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A_ = 0
return None
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return self._lock_file
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return self._timeout
@timeout.setter
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
A_ = float(__snake_case )
return None
def __UpperCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
def __UpperCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return self._lock_file_fd is not None
def __UpperCAmelCase ( self ,__snake_case=None ,__snake_case=0.05 ):
"""simple docstring"""
if timeout is None:
A_ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A_ = id(self )
A_ = self._lock_file
A_ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__snake_case )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A_ = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCAmelCase ( self ,__snake_case=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A_ = id(self )
A_ = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
A_ = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self ,__snake_case ,__snake_case ,__snake_case ):
"""simple docstring"""
self.release()
return None
def __del__( self ):
"""simple docstring"""
self.release(force=__snake_case )
return None
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = os.path.basename(__snake_case )
if len(__snake_case ) > max_length and max_length > 0:
A_ = os.path.dirname(__snake_case )
A_ = str(hash(__snake_case ) )
A_ = filename[: max_length - len(__snake_case ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__snake_case ,__snake_case )
else:
return path
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self ,__snake_case ,__snake_case=-1 ,__snake_case=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__snake_case ,timeout=__snake_case ,max_filename_length=__snake_case )
A_ = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A_ = os.open(self._lock_file ,__snake_case )
except OSError:
pass
else:
try:
msvcrt.locking(__snake_case ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(__snake_case )
else:
A_ = fd
return None
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self._lock_file_fd
A_ = None
msvcrt.locking(__snake_case ,msvcrt.LK_UNLCK ,1 )
os.close(__snake_case )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self ,__snake_case ,__snake_case=-1 ,__snake_case=None ):
"""simple docstring"""
A_ = os.statvfs(os.path.dirname(__snake_case ) ).f_namemax
super().__init__(__snake_case ,timeout=__snake_case ,max_filename_length=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A_ = os.open(self._lock_file ,__snake_case )
try:
fcntl.flock(__snake_case ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__snake_case )
else:
A_ = fd
return None
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self._lock_file_fd
A_ = None
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
os.close(__snake_case )
return None
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A_ = os.open(self._lock_file ,__snake_case )
except OSError:
pass
else:
A_ = fd
return None
def __UpperCAmelCase ( self ):
"""simple docstring"""
os.close(self._lock_file_fd )
A_ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a__ : Union[str, Any] = None
if msvcrt:
a__ : Dict = WindowsFileLock
elif fcntl:
a__ : Dict = UnixFileLock
else:
a__ : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 188
| 1
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCamelCase : Optional[Any] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__lowerCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _lowercase ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
lowercase , lowercase = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase_ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase_ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def _lowercase ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = {}
lowercase = R".*sequential.(\d+).*"
lowercase = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase = key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
# replace sequential layers with list
lowercase = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 )
lowercase = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(lowerCAmelCase_ )//3}.linear.' )
elif re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase = 1 if projecton_layer == 0 else 2
lowercase = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase = value
lowercase = mixed_qkv.size(0 ) // 3
lowercase = mixed_qkv[:qkv_dim]
lowercase = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase = mixed_qkv[qkv_dim * 2 :]
lowercase = query_layer
lowercase = key_layer
lowercase = value_layer
else:
lowercase = value
return model_state_dict
def _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
lowercase , lowercase = init_clap(lowerCAmelCase_ , enable_fusion=lowerCAmelCase_ )
clap_model.eval()
lowercase = clap_model.state_dict()
lowercase = rename_state_dict(lowerCAmelCase_ )
lowercase = ClapConfig()
lowercase = enable_fusion
lowercase = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__lowerCamelCase : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 715
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__(self : Dict , A__ : str , A__ : int=3 , A__ : Dict=3_2 , A__ : str=3 , A__ : str=1_0 , A__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , A__ : Tuple=[1, 1, 2, 1] , A__ : int=True , A__ : List[Any]=True , A__ : List[Any]="relu" , A__ : Any=3 , A__ : Any=None , ) -> Tuple:
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__ )
def UpperCAmelCase__ (self : str ) -> List[str]:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase__ (self : Dict , A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict ) -> str:
lowercase = RegNetModel(config=A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ (self : List[str] , A__ : List[str] , A__ : Union[str, Any] , A__ : str ) -> Dict:
lowercase = self.num_labels
lowercase = RegNetForImageClassification(A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self : Any ) -> Union[str, Any]:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = False
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = False
def UpperCAmelCase__ (self : Optional[int] ) -> Tuple:
lowercase = RegNetModelTester(self )
lowercase = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def UpperCAmelCase__ (self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ (self : Optional[Any] ) -> int:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase__ (self : int ) -> Optional[int]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ (self : Any ) -> str:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCAmelCase__ (self : List[str] ) -> Tuple:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
def check_hidden_states_output(A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ):
lowercase = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ , A__ ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Tuple:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = RegNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self : Optional[int] ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self : List[str] ) -> int:
lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
lowercase = model(**A__ )
# verify the logits
lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A__ )
lowercase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 459
| 0
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=[3_0, 3_0] , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1_0 , __snake_case=0.02 , __snake_case=3 , __snake_case=None , __snake_case=8 , __snake_case=1_0 , ):
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
snake_case = n_targets
snake_case = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
snake_case = (image_size[1] // patch_size) * (image_size[0] // patch_size)
snake_case = num_patches + 1 + self.num_detection_tokens
def a_ ( self ):
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
snake_case = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
snake_case = []
for i in range(self.batch_size ):
snake_case = {}
snake_case = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
snake_case = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
snake_case = self.get_config()
return config, pixel_values, labels
def a_ ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(pixel_values=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
snake_case = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__magic_name__ = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self , __snake_case , __snake_case , __snake_case=False ):
snake_case = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
snake_case = []
for i in range(self.model_tester.batch_size ):
snake_case = {}
snake_case = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
snake_case = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
snake_case = labels
return inputs_dict
def a_ ( self ):
snake_case = YolosModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
# YOLOS does not use inputs_embeds
pass
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
# in YOLOS, the seq_len is different
snake_case = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = True
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case = len(__snake_case )
# Check attention is always last and order is fine
snake_case = True
snake_case = True
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
snake_case = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def a_ ( self ):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = outputs.hidden_states
snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
snake_case = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def a_ ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a_ ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def a_ ( self ):
snake_case = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(inputs.pixel_values )
# verify outputs
snake_case = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , __snake_case )
snake_case = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__snake_case , )
snake_case = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 ) )
# verify postprocessing
snake_case = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
snake_case = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__snake_case )
snake_case = [7_5, 7_5, 1_7, 6_3, 1_7]
snake_case = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 550
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
snake_case = load_file(UpperCamelCase_ )
snake_case = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
snake_case = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
snake_case = pipeline.text_encoder
else:
snake_case = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
snake_case = pipeline.unet
# find the target layer
snake_case = layer_infos.pop(0 )
while len(UpperCamelCase_ ) > -1:
try:
snake_case = curr_layer.__getattr__(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
snake_case = layer_infos.pop(0 )
elif len(UpperCamelCase_ ) == 0:
break
except Exception:
if len(UpperCamelCase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
snake_case = layer_infos.pop(0 )
snake_case = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' ,'''lora_up''' ) )
pair_keys.append(UpperCamelCase_ )
else:
pair_keys.append(UpperCamelCase_ )
pair_keys.append(key.replace('''lora_up''' ,'''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
snake_case = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
snake_case = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_ ,UpperCamelCase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
snake_case = state_dict[pair_keys[0]].to(torch.floataa )
snake_case = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_ ,UpperCamelCase_ )
# update visited list
for item in pair_keys:
visited.append(UpperCamelCase_ )
return pipeline
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = args.base_model_path
_SCREAMING_SNAKE_CASE : Dict = args.checkpoint_path
_SCREAMING_SNAKE_CASE : List[str] = args.dump_path
_SCREAMING_SNAKE_CASE : Dict = args.lora_prefix_unet
_SCREAMING_SNAKE_CASE : List[str] = args.lora_prefix_text_encoder
_SCREAMING_SNAKE_CASE : Optional[Any] = args.alpha
_SCREAMING_SNAKE_CASE : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_SCREAMING_SNAKE_CASE : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 550
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' ,__SCREAMING_SNAKE_CASE ,)
super().__init__(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
| 702
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE : List[Any] = np.zeros_like(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE : Optional[int] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCAmelCase = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__UpperCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCAmelCase = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 220
| 0
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' ,['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' ,['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' ,[None, '''v2'''] )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = hf_hub_url(repo_id=UpperCAmelCase ,path=UpperCAmelCase ,revision=UpperCAmelCase )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCAmelCase )}'''
| 393
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A_ = (3, 9, -1_1, 0, 7, 5, 1, -1)
A_ = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : Node | None
class snake_case :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Iterable[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
for i in sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = Node(lowerCAmelCase_ , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE_ = node.next_node
def __len__( self : Union[str, Any] ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Dict ) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCAmelCase_ ) for node in self] )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(UpperCAmelCase ) + list(UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 393
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase = ['gpt2']
__UpperCAmelCase = 'gpt2'
if is_tf_available():
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE ):
super().__init__()
SCREAMING_SNAKE_CASE : Any = tokenizer
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = TFGPTaLMHeadModel.from_config(__SCREAMING_SNAKE_CASE )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='text' ),) )
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = self.tokenizer(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = tokenized['input_ids'].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : Optional[int] = self.model(input_ids=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )['logits']
return outputs
@require_tf
@require_keras_nlp
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
super().setUp()
SCREAMING_SNAKE_CASE : List[str] = [GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : Optional[Any] = [TFGPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
SCREAMING_SNAKE_CASE : Tuple = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def __a ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : List[str] = tokenizer([test_inputs] ,return_tensors='tf' )
SCREAMING_SNAKE_CASE : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : Dict = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__SCREAMING_SNAKE_CASE ,tf.intaa ) == tf_outputs_values ) )
@slow
def __a ( self ):
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.function(__SCREAMING_SNAKE_CASE )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Tuple = tf.constant(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = compiled_tokenizer(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(__SCREAMING_SNAKE_CASE )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __a ( self ):
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : int = ModelToSave(tokenizer=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model.serving(__SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : int = Path(__SCREAMING_SNAKE_CASE ) / 'saved.model'
tf.saved_model.save(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,signatures={'serving_default': model.serving} )
SCREAMING_SNAKE_CASE : int = tf.saved_model.load(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = loaded_model.signatures['serving_default'](__SCREAMING_SNAKE_CASE )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __a ( self ):
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer(__SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : List[str] = TFGPTaTokenizer.from_config(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = model_from_config(__SCREAMING_SNAKE_CASE )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __a ( self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : str = 123123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer(__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 220
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaImgaImgPipeline
A = ['image_embeds', 'negative_image_embeds', 'image']
A = [
'image_embeds',
'negative_image_embeds',
'image',
]
A = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A = False
@property
def __a ( self ):
return 32
@property
def __a ( self ):
return 32
@property
def __a ( self ):
return self.time_input_dim
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 100
@property
def __a ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __a ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ):
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : List[str] = self.dummy_movq
SCREAMING_SNAKE_CASE : int = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
SCREAMING_SNAKE_CASE : int = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=0 ):
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((256, 256) )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[Any] = 'cpu'
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ,return_dict=__SCREAMING_SNAKE_CASE ,)[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
SCREAMING_SNAKE_CASE : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
SCREAMING_SNAKE_CASE : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A red cartoon frog, 4k'
SCREAMING_SNAKE_CASE : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = pipe_prior(
__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
image=__SCREAMING_SNAKE_CASE ,image_embeds=__SCREAMING_SNAKE_CASE ,negative_image_embeds=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
| 220
| 1
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_A = TypeVar("""KEY""")
_A = TypeVar("""VAL""")
@dataclass(frozen=_snake_case , slots=_snake_case )
class lowerCamelCase ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
class lowerCamelCase ( _Item ):
'''simple docstring'''
def __init__(self ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __bool__(self ):
"""simple docstring"""
return False
_A = _DeletedItem()
class lowerCamelCase ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self , _lowerCamelCase = 8 , _lowerCamelCase = 0.75 ):
"""simple docstring"""
UpperCAmelCase__ : str = initial_block_size
UpperCAmelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase__ : Union[str, Any] = capacity_factor
UpperCAmelCase__ : Union[str, Any] = 0
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return hash(_UpperCamelCase ) % len(self._buckets )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self._buckets[ind]
if not stored:
UpperCAmelCase__ : int = _Item(_UpperCamelCase , _UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase__ : int = _Item(_UpperCamelCase , _UpperCamelCase )
return True
else:
return False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCamelCase )
def _a (self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase__ : str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self._buckets
UpperCAmelCase__ : Optional[Any] = [None] * new_size
UpperCAmelCase__ : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _a (self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def _a (self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self._get_bucket_index(_UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase__ : str = self._get_next_ind(_UpperCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
if self._try_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
break
def __setitem__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_UpperCamelCase , _UpperCamelCase )
def __delitem__(self , _lowerCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
UpperCAmelCase__ : str = self._buckets[ind]
if item is None:
raise KeyError(_UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase__ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCamelCase )
def __len__(self ):
"""simple docstring"""
return self._len
def __iter__(self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ' ,'.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 182
|
__UpperCAmelCase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 406
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""PoolFormerFeatureExtractor"""]
_A : Dict = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100
|
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 667
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE (__snake_case ):
lowerCAmelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_UpperCAmelCase):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__A : Optional[Any] = deprecated_arg[3:]
setattr(self , _lowercase , not kwargs.pop(_lowercase))
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}')
__A : Any = kwargs.pop('torchscript' , self.torchscript)
__A : Dict = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
__A : Any = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**_lowercase)
lowerCAmelCase = field(default=__snake_case , metadata={'''help''': '''Trace the models using torchscript'''} )
lowerCAmelCase = field(default=__snake_case , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
lowerCAmelCase = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
__A : str = torch.device('cpu')
__A : Optional[Any] = 0
elif is_torch_tpu_available():
__A : Optional[int] = xm.xla_device()
__A : Any = 0
else:
__A : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
__A : Dict = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.n_gpu > 0
| 719
|
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _lowerCAmelCase ( __snake_case : Dataset , __snake_case : Dict[str, str] ) -> Any:
__A : List[str] = args.log_outputs
__A : Tuple = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__A : Tuple = load_metric('wer' )
__A : Union[str, Any] = load_metric('cer' )
# compute metrics
__A : List[str] = wer.compute(references=result['target'] , predictions=result['prediction'] )
__A : str = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__A : List[str] = f'WER: {wer_result}\nCER: {cer_result}'
print(__snake_case )
with open(f'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(__snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__A : Optional[int] = f'log_{dataset_id}_predictions.txt'
__A : List[str] = f'log_{dataset_id}_targets.txt'
with open(__snake_case , 'w' ) as p, open(__snake_case , 'w' ) as t:
# mapping function to write output
def write_to_file(__snake_case : List[str] , __snake_case : List[Any] ):
p.write(f'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__snake_case , with_indices=__snake_case )
def _lowerCAmelCase ( __snake_case : str ) -> str:
__A : Any = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__A : List[Any] = re.sub(__snake_case , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__A : Optional[int] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__A : Optional[int] = ' '.join(text.split(__snake_case ) )
return text
def _lowerCAmelCase ( __snake_case : Any ) -> List[Any]:
# load dataset
__A : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__A : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
__A : Optional[int] = feature_extractor.sampling_rate
# resample audio
__A : List[Any] = dataset.cast_column('audio' , Audio(sampling_rate=__snake_case ) )
# load eval pipeline
if args.device is None:
__A : List[Any] = 0 if torch.cuda.is_available() else -1
__A : Union[str, Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__snake_case : Optional[Any] ):
__A : int = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__A : Optional[Any] = prediction['text']
__A : str = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__A : Dict = dataset.map(__snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__snake_case , __snake_case )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowercase__ : Optional[Any] = parser.parse_args()
main(args)
| 338
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase : List[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = path + """.py"""
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = path + """.py"""
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
lowerCamelCase__ = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCamelCase__ = expected_configs[0]
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 50
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = '''▁'''
_UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
_UpperCAmelCase : Union[str, Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_UpperCAmelCase : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_="<pad>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<mask_2>" , snake_case_="<mask_1>" , snake_case_=None , snake_case_=1_03 , snake_case_ = None , **snake_case_ , ):
lowercase =offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(snake_case_ )}, but is'
f' {type(snake_case_ )}' )
lowercase =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowercase =additional_special_tokens_extended
else:
lowercase =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , pad_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
lowercase =mask_token_sent
lowercase =vocab_file
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# add special tokens to encoder dict
lowercase ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase ={v: k for k, v in self.encoder.items()}
@property
def _A( self ):
return len(self.sp_model ) + self.offset
def _A( self ):
lowercase ={self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase =self.__dict__.copy()
lowercase =None
return state
def __setstate__( self , snake_case_ ):
lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase ={}
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _A( self , snake_case_ ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase =self.sp_model.piece_to_id(snake_case_ )
return sp_id + self.offset
def _A( self , snake_case_ ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase =self.sp_model.IdToPiece(index - self.offset )
return token
def _A( self , snake_case_ ):
lowercase =[]
lowercase =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
lowercase =[]
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def _A( self , snake_case_=False ):
return 1
def _A( self , snake_case_ ):
lowercase =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A( self , snake_case_ , snake_case_=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
lowercase =self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 72
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( __a ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''BlipImageProcessor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self : List[Any] , a : Any , a : Dict ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = False
super().__init__(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor
def __call__( self : List[str] , a : ImageInput = None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ) ->Tuple:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
SCREAMING_SNAKE_CASE__ : Any = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def A_ ( self : Dict , *a : int , **a : List[str] ) ->Tuple:
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def A_ ( self : str , *a : int , **a : int ) ->Optional[int]:
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self : List[str] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 705
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (6_4,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 3 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 2_5_6 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = 0.1_82_15 , __UpperCAmelCase = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ :Optional[Any] = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ :List[str] = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
lowerCAmelCase__ :List[str] = VectorQuantizer(__UpperCAmelCase , __UpperCAmelCase , beta=0.25 , remap=__UpperCAmelCase , sane_index_shape=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
# pass init params to Decoder
lowerCAmelCase__ :Tuple = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , norm_type=__UpperCAmelCase , )
@apply_forward_hook
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
lowerCAmelCase__ :int = self.encoder(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.quant_conv(__UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCAmelCase )
@apply_forward_hook
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True ):
'''simple docstring'''
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.quantize(__UpperCAmelCase )
else:
lowerCAmelCase__ :List[str] = h
lowerCAmelCase__ :Dict = self.post_quant_conv(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.decoder(__UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
lowerCAmelCase__ :int = sample
lowerCAmelCase__ :Optional[int] = self.encode(__UpperCAmelCase ).latents
lowerCAmelCase__ :Tuple = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 93
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Any = logging.get_logger(__name__)
_a : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_a : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_a : str = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = GPTaTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ :List[str] = kwargs.pop("add_bos_token" , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Any = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :Union[str, Any] = pre_tok_class(**_lowerCAmelCase )
lowerCAmelCase__ :int = add_prefix_space
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
lowerCAmelCase__ :int = input_ids[-self.model_max_length :]
return input_ids
| 145
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_a)
class _lowerCAmelCase( _a):
"""simple docstring"""
def __init__( self , **UpperCAmelCase )-> Dict:
super().__init__(**UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Optional[int]:
__A = {}
__A = {}
__A = {}
# preprocess args
if "points_per_batch" in kwargs:
__A = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__A = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__A = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__A = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__A = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__A = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__A = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__A = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__A = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__A = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__A = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__A = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , UpperCAmelCase , *UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase )-> List[str]:
return super().__call__(UpperCAmelCase , *UpperCAmelCase , num_workers=UpperCAmelCase , batch_size=UpperCAmelCase , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase=64 , UpperCAmelCase = 0 , UpperCAmelCase = 5_12 / 15_00 , UpperCAmelCase = 32 , UpperCAmelCase = 1 , )-> Dict:
__A = load_image(UpperCAmelCase )
__A = self.image_processor.size['''longest_edge''']
__A , __A , __A , __A = self.image_processor.generate_crop_boxes(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__A = self.image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
__A = self.get_inference_context()
with inference_context():
__A = self._ensure_tensor_on_device(UpperCAmelCase , device=self.device )
__A = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
__A = image_embeddings
__A = grid_points.shape[1]
__A = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , UpperCAmelCase , UpperCAmelCase ):
__A = grid_points[:, i : i + points_per_batch, :, :]
__A = input_labels[:, i : i + points_per_batch]
__A = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase=0.88 , UpperCAmelCase=0.95 , UpperCAmelCase=0 , UpperCAmelCase=1 , )-> Tuple:
__A = model_inputs.pop('''input_boxes''' )
__A = model_inputs.pop('''is_last''' )
__A = model_inputs.pop('''original_sizes''' ).tolist()
__A = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
__A = self.model(**UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__A = model_outputs['''pred_masks''']
__A = self.image_processor.post_process_masks(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , binarize=UpperCAmelCase )
__A = model_outputs['''iou_scores''']
__A , __A , __A = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.7 , )-> Any:
__A = []
__A = []
__A = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
__A = torch.cat(UpperCAmelCase )
__A = torch.cat(UpperCAmelCase )
__A , __A , __A , __A = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__A = defaultdict(UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase )
__A = {}
if output_rle_mask:
__A = rle_mask
if output_bboxes_mask:
__A = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 719
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def __UpperCamelCase ( snake_case , snake_case , snake_case = 1_6_0_0_0 ) -> Tuple:
'''simple docstring'''
__A = int(round(sample_rate * max_length ) )
if len(snake_case ) <= sample_length:
return wav
__A = randint(0 , len(snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(default=_a , metadata={'''help''': '''Name of a dataset from the datasets package'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''A file containing the training audio paths and labels.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''A file containing the validation audio paths and labels.'''})
lowerCamelCase__ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCamelCase__ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCamelCase__ = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
lowerCamelCase__ = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''})
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''})
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Name or path of preprocessor config.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''})
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
__A = DatasetDict()
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__A = feature_extractor.model_input_names[0]
def train_transforms(snake_case ):
__A = []
for audio in batch[data_args.audio_column_name]:
__A = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case )
__A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__A = {model_input_name: inputs.get(snake_case )}
__A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case ):
__A = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
__A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__A = {model_input_name: inputs.get(snake_case )}
__A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__A = raw_datasets['''train'''].features[data_args.label_column_name].names
__A , __A = {}, {}
for i, label in enumerate(snake_case ):
__A = str(snake_case )
__A = label
# Load the accuracy metric from the datasets package
__A = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
__A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case , references=eval_pred.label_ids )
__A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case ) , labelaid=snake_case , idalabel=snake_case , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__A = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case , output_all_columns=snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__A = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case , output_all_columns=snake_case )
# Initialize our trainer
__A = Trainer(
model=snake_case , args=snake_case , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , )
# Training
if training_args.do_train:
__A = None
if training_args.resume_from_checkpoint is not None:
__A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A = last_checkpoint
__A = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__A = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__A = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 341
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: Union[str, Any] =len(__lowerCamelCase ) # No of vertices in graph
SCREAMING_SNAKE_CASE_: int =[0] * n
SCREAMING_SNAKE_CASE_: List[str] =[False] * n
def dfs(lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =True
SCREAMING_SNAKE_CASE_: int =id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , id_ )
SCREAMING_SNAKE_CASE_: List[str] =min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
SCREAMING_SNAKE_CASE_: List[str] =min(low[at] , low[to] )
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
for i in range(__lowerCamelCase ):
if not visited[i]:
dfs(__lowerCamelCase , -1 , __lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 409
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[Any] = LxmertTokenizer
UpperCamelCase : str = LxmertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Optional[int] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = 'UNwant\u00E9d,running'
_lowerCAmelCase = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__magic_name__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 1_0, 8, 9] )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = 'I was born in 92000, and this is falsé.'
_lowerCAmelCase = tokenizer.tokenize(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
| 589
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_UpperCamelCase : Any = logging.get_logger(__name__)
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = '''AutoTokenizer'''
lowerCamelCase__ = ['''tokenizer''']
lowerCamelCase__ = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None )-> Tuple:
super().__init__(UpperCAmelCase )
__A = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase )-> str:
if speaker_embeddings_dict_path is not None:
__A = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('''subfolder''' , UpperCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCAmelCase ) , force_download=kwargs.pop('''force_download''' , UpperCAmelCase ) , proxies=kwargs.pop('''proxies''' , UpperCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , UpperCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCAmelCase ) , revision=kwargs.pop('''revision''' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
__A = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
__A = json.load(UpperCAmelCase )
else:
__A = None
__A = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , )-> Dict:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , '''v2''' ) , exist_ok=UpperCAmelCase )
__A = {}
__A = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__A = self._load_voice_preset(UpperCAmelCase )
__A = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , UpperCAmelCase , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
__A = os.path.join(UpperCAmelCase , f"{prompt_key}_{key}.npy" )
__A = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , '''w''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase = None , **UpperCAmelCase )-> Tuple:
__A = self.speaker_embeddings[voice_preset]
__A = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
__A = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , UpperCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCAmelCase ) , force_download=kwargs.pop('''force_download''' , UpperCAmelCase ) , proxies=kwargs.pop('''proxies''' , UpperCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , UpperCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCAmelCase ) , revision=kwargs.pop('''revision''' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
__A = np.load(UpperCAmelCase )
return voice_preset_dict
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase = None )-> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=2_56 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , )-> int:
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__A = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('''.npz''' ):
__A = voice_preset + '''.npz'''
__A = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
__A = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
__A = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
__A = voice_preset
return encoded_text
| 341
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = GPTSanJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = {'''do_clean_text''': False, '''add_prefix_space''': False}
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
super().setUp()
# fmt: off
__A = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__A = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__A = {'''unk_token''': '''<unk>'''}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Union[str, Any]:
__A = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__A = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Optional[int]:
__A , __A = self.get_input_output_texts(UpperCAmelCase )
__A = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = self.get_tokenizer()
# Testing tokenization
__A = '''こんにちは、世界。 こんばんは、㔺界。'''
__A = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__A = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
__A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__A = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
__A = tokens + [tokenizer.unk_token]
__A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__A = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
__A = self.get_tokenizer()
# Testing tokenization
__A = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__A = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
__A = tokenizer.encode(UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__A = '''こんにちは、世界。'''
__A = '''こんばんは、㔺界。😀'''
__A = '''こんにちは、世界。こんばんは、世界。😀'''
__A = tokenizer.encode(prefix_text + input_text )
__A = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
__A = tokenizer.encode(UpperCAmelCase , prefix_text=UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
__A = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__A = '''こんにちは、世界。'''
__A = '''こんばんは、㔺界。😀'''
__A = len(tokenizer.encode(UpperCAmelCase ) ) - 2
__A = len(tokenizer.encode(UpperCAmelCase ) ) - 2
__A = [1] + [0] * (len_prefix + len_text + 1)
__A = [1] * (len_prefix + len_text + 1) + [0]
__A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__A = tokenizer(prefix_text + input_text ).token_type_ids
__A = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
__A = tokenizer(UpperCAmelCase , prefix_text=UpperCAmelCase ).token_type_ids
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__A = tokenizer.encode('''あンいワ''' )
__A = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
__A = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__A = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__A = tokenizer(UpperCAmelCase , padding=UpperCAmelCase )
__A = tokenizer.batch_encode_plus(UpperCAmelCase , padding=UpperCAmelCase )
# fmt: off
__A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
__A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> int:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
# tokenizer has no padding token
pass
| 341
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE_ : int = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 375
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE_ : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = {}
state_dict.pop('pixel_mean' , snake_case )
state_dict.pop('pixel_std' , snake_case )
__lowercase = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
__lowercase = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
__lowercase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__lowercase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__lowercase = key.replace('layers.2' , 'proj_out' )
__lowercase = value
__lowercase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ) -> int:
__lowercase = hf_hub_download(snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__lowercase = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowercase = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowercase = SamConfig(
vision_config=snake_case , )
__lowercase = torch.load(snake_case , map_location='cpu' )
__lowercase = replace_keys(snake_case )
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(image_processor=snake_case )
__lowercase = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
__lowercase = hf_model.to('cuda' )
__lowercase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' )
__lowercase = [[[400, 650]]]
__lowercase = [[1]]
__lowercase = processor(images=np.array(snake_case ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__lowercase = ((75, 275, 1_725, 850),)
__lowercase = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__lowercase = [[[400, 650], [800, 650]]]
__lowercase = [[1, 1]]
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 375
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCAmelCase_ = StableUnCLIPPipeline
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase_ = False
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = 32
UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=UpperCamelCase_ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase_ , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=UpperCamelCase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase_ )
UpperCamelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase_ , layers_per_block=1 , upcast_attention=UpperCamelCase_ , use_linear_projection=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCamelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL()
UpperCamelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case_ (self , __a , __a=0 ) -> int:
if str(UpperCamelCase_ ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase_ )
def snake_case_ (self ) -> Dict:
UpperCamelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase_ )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
UpperCamelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase = pipe("anime turle" , generator=UpperCamelCase_ , output_type="np" )
UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
def snake_case_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
UpperCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 714
|
"""simple docstring"""
import argparse
from collections import defaultdict
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = F"class {class_name}("
UpperCamelCase = F"{4 * ' '}def {test_name}("
UpperCamelCase = F"{8 * ' '}{correct_line.split()[0]}"
UpperCamelCase = F"{16 * ' '}{correct_line.split()[0]}"
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
UpperCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCamelCase = UpperCamelCase = UpperCamelCase = UpperCamelCase = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = {l.strip() for l in f.readlines()}
else:
UpperCamelCase = None
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 544
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : int = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : int ='''umt5'''
a : Optional[Any] =['''past_key_values''']
def __init__( self , _lowerCamelCase=2_5_0_1_1_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=6_4 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase=6 , _lowerCamelCase=3_2 , _lowerCamelCase=1_2_8 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-6 , _lowerCamelCase=1.0 , _lowerCamelCase="gated-gelu" , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="T5Tokenizer" , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=0 , **_lowerCamelCase , ):
super().__init__(
is_encoder_decoder=_lowerCamelCase , tokenizer_class=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_: str = vocab_size
UpperCamelCase_: Any = d_model
UpperCamelCase_: Any = d_kv
UpperCamelCase_: Optional[Any] = d_ff
UpperCamelCase_: str = num_layers
UpperCamelCase_: Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase_: Optional[Any] = num_heads
UpperCamelCase_: List[str] = relative_attention_num_buckets
UpperCamelCase_: Union[str, Any] = relative_attention_max_distance
UpperCamelCase_: List[str] = dropout_rate
UpperCamelCase_: str = layer_norm_epsilon
UpperCamelCase_: Dict = initializer_factor
UpperCamelCase_: Optional[int] = feed_forward_proj
UpperCamelCase_: List[Any] = use_cache
UpperCamelCase_: Dict = self.feed_forward_proj.split('-' )
UpperCamelCase_: List[str] = act_info[-1]
UpperCamelCase_: str = act_info[0] == 'gated'
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
UpperCamelCase_: int = 'gelu_new'
@property
def _a ( self ):
return self.d_model
@property
def _a ( self ):
return self.num_heads
@property
def _a ( self ):
return self.num_layers
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self ):
UpperCamelCase_: Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCamelCase_: Tuple = 'past_encoder_sequence + sequence'
UpperCamelCase_: Any = {0: 'batch'}
UpperCamelCase_: Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase_: Tuple = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase_: Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self ):
return 1_3
@property
def _a ( self ):
return 5e-4
| 57
|
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ = re.compile(R"""^\s*else:""")
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : Tuple = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ : str = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' )
lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : Any = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ : Tuple = lines[line_index]
lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ : Any = lines[line_index]
lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : Optional[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' )
lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase )
if objects is not None:
lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
lowerCAmelCase__ : str = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
lowerCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase )
lowerCAmelCase__ : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) )
lowerCAmelCase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 678
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Tuple = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["OwlViTFeatureExtractor"]
__UpperCamelCase : Union[str, Any] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = ["""names""", """prefix"""]
__UpperCamelCase : List[str] = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
__UpperCamelCase : Dict = ["""encoding_errors""", """on_bad_lines"""]
__UpperCamelCase : Any = ["""date_format"""]
@dataclass
class __SCREAMING_SNAKE_CASE( datasets.BuilderConfig ):
_UpperCAmelCase = ","
_UpperCAmelCase = None
_UpperCAmelCase = "infer"
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = "."
_UpperCAmelCase = None
_UpperCAmelCase = '"'
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = 1_0_0_0_0
_UpperCAmelCase = None
_UpperCAmelCase = "strict"
_UpperCAmelCase = "error"
_UpperCAmelCase = None
def lowerCAmelCase_ ( self: Tuple ) -> int:
if self.delimiter is not None:
snake_case__ = self.delimiter
if self.column_names is not None:
snake_case__ = self.column_names
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]:
snake_case__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE( datasets.ArrowBasedBuilder ):
_UpperCAmelCase = CsvConfig
def lowerCAmelCase_ ( self: str ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Dict ) -> int:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase , (str, list, tuple) ):
snake_case__ = data_files
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def lowerCAmelCase_ ( self: int , UpperCamelCase: pa.Table ) -> pa.Table:
if self.config.features is not None:
snake_case__ = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
snake_case__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case__ = table_cast(UpperCamelCase , UpperCamelCase )
return pa_table
def lowerCAmelCase_ ( self: str , UpperCamelCase: Tuple ) -> Tuple:
snake_case__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ):
snake_case__ = pd.read_csv(UpperCamelCase , iterator=UpperCamelCase , dtype=UpperCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCamelCase ):
snake_case__ = pa.Table.from_pandas(UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase )}: {e}''' )
raise
| 372
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( _A ) -> Union[str, Any]:
if len(_a ) == 0:
return []
lowercase : Union[str, Any] = min(_a ), max(_a )
lowercase : List[str] = int(max_value - min_value ) + 1
lowercase : list[list] = [[] for _ in range(_a )]
for i in my_list:
buckets[int(i - min_value )].append(_a )
return [v for bucket in buckets for v in sorted(_a )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 264
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385
| 0
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCAmelCase ( a_=None, a_=None ):
'''simple docstring'''
return field(default_factory=lambda: default, metadata=a_ )
@dataclass
class _lowercase :
lowercase_ = field(
metadata={'help': 'The csv file to plot.'} , )
lowercase_ = field(
default=__UpperCAmelCase , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
lowercase_ = field(
default=__UpperCAmelCase , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
lowercase_ = field(
default=__UpperCAmelCase , metadata={'help': 'Disable logarithmic scale when plotting'} , )
lowercase_ = field(
default=__UpperCAmelCase , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
lowercase_ = field(
default=__UpperCAmelCase , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
lowercase_ = list_field(
default=__UpperCAmelCase , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
try:
int(a_ )
return True
except ValueError:
return False
def UpperCAmelCase ( a_ ):
'''simple docstring'''
try:
float(a_ )
return True
except ValueError:
return False
class _lowercase :
def __init__( self , UpperCAmelCase_ ) -> str:
lowerCamelCase : Dict = args
lowerCamelCase : Union[str, Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
lowerCamelCase : List[str] = csv.DictReader(UpperCAmelCase_ )
for row in reader:
lowerCamelCase : str = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowerCamelCase : Dict = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowerCamelCase : Optional[Any] = float(row['result'] )
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : Optional[int] = plt.subplots()
lowerCamelCase : Tuple = 'Time usage' if self.args.is_time else 'Memory usage'
lowerCamelCase : Dict = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCamelCase : List[str] = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowerCamelCase : Tuple = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowerCamelCase : List[str] = self.result_dict[model_name]['result']
(lowerCamelCase) : Optional[int] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCamelCase : Dict = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCamelCase : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCAmelCase_ , )
else:
lowerCamelCase : int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(lowerCamelCase) : Tuple = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowerCamelCase : str = np.asarray(UpperCAmelCase_ , UpperCAmelCase_ )[: len(UpperCAmelCase_ )]
plt.scatter(
UpperCAmelCase_ , UpperCAmelCase_ , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ , '--' )
title_str += F""" {label_model_name} vs."""
lowerCamelCase : Optional[Any] = title_str[:-4]
lowerCamelCase : Union[str, Any] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(UpperCAmelCase_ )
plt.xlabel(UpperCAmelCase_ )
plt.ylabel(UpperCAmelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = HfArgumentParser(a_ )
lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
lowerCamelCase : int = Plot(args=a_ )
plot.plot()
if __name__ == "__main__":
main()
| 714
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BlipImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
lowerCamelCase : Tuple = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : List[Any] = self.image_processor
def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCamelCase : str = self.tokenizer
lowerCamelCase : List[str] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
lowerCamelCase : Any = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
lowerCamelCase : List[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
lowerCamelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> str:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 133
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , a_ : Any , a_ : Union[str, Any]=13 , a_ : Dict=7 , a_ : int=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]=True , a_ : List[Any]=True , a_ : Optional[Any]=99 , a_ : str=32 , a_ : List[str]=2 , a_ : List[Any]=4 , a_ : Optional[Any]=37 , a_ : int="gelu" , a_ : Dict=0.1 , a_ : Union[str, Any]=0.1 , a_ : List[str]=512 , a_ : List[Any]=16 , a_ : Union[str, Any]=2 , a_ : Any=0.02 , a_ : Tuple=3 , a_ : Tuple=4 , a_ : int=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = 13
__snake_case = 7
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 99
__snake_case = 32
__snake_case = 2
__snake_case = 4
__snake_case = 37
__snake_case = "gelu"
__snake_case = 0.1
__snake_case = 0.1
__snake_case = 512
__snake_case = 16
__snake_case = 2
__snake_case = 0.02
__snake_case = 3
__snake_case = 4
__snake_case = None
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Union[str, Any] , a_ : List[Any] , a_ : List[str] , a_ : Dict , a_ : str , a_ : Dict , a_ : List[str] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = TFRoFormerModel(config=a_ )
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__snake_case = [input_ids, input_mask]
__snake_case = model(a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , a_ : str , a_ : int , a_ : Any , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : List[Any] ):
"""simple docstring"""
__snake_case = True
__snake_case = TFRoFormerForCausalLM(config=a_ )
__snake_case = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case = model(a_ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : Any , a_ : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : Any , a_ : List[Any] , a_ : str , a_ : List[str] ):
"""simple docstring"""
__snake_case = TFRoFormerForMaskedLM(config=a_ )
__snake_case = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : str , a_ : Tuple , a_ : Tuple , a_ : str , a_ : Tuple , a_ : Dict , a_ : Any , a_ : Dict ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = TFRoFormerForSequenceClassification(config=a_ )
__snake_case = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int , a_ : int , a_ : str , a_ : Any , a_ : Optional[Any] , a_ : Any , a_ : Tuple , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = TFRoFormerForMultipleChoice(config=a_ )
__snake_case = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__snake_case = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__snake_case = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__snake_case = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__snake_case = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] , a_ : Any , a_ : List[Any] , a_ : List[Any] , a_ : Any , a_ : List[str] , a_ : Dict , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = TFRoFormerForTokenClassification(config=a_ )
__snake_case = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Dict , a_ : str , a_ : int , a_ : Any , a_ : List[str] , a_ : Optional[int] , a_ : List[Any] , a_ : str ):
"""simple docstring"""
__snake_case = TFRoFormerForQuestionAnswering(config=a_ )
__snake_case = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Any , a_ : Union[str, Any] , a_ : List[str] , a_ : Any , a_ : List[Any] , a_ : Dict ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = TFRoFormerModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def A ( self : Dict ):
"""simple docstring"""
__snake_case = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(a_ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__snake_case = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(a_ )[0]
# TODO Replace vocab size
__snake_case = 50_000
__snake_case = [1, 6, vocab_size]
self.assertEqual(output.shape , a_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__snake_case = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a_ , atol=1e-4 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = 1E-4
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = tf.constant([[4, 10]] )
__snake_case = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__snake_case = emba(input_ids.shape )
__snake_case = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(a_ , a_ , atol=self.tolerance )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__snake_case = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__snake_case = emba.weight[:3, :5]
tf.debugging.assert_near(a_ , a_ , atol=self.tolerance )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = 1E-4
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__snake_case = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__snake_case = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__snake_case = embed_positions([2, 16, 768] )[None, None, :, :]
__snake_case , __snake_case = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
a_ , a_ , a_ )
__snake_case = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__snake_case = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , a_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , a_ , atol=self.tolerance )
| 69
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
'''simple docstring'''
def _snake_case ( self , __A ):
"""simple docstring"""
raise NotImplementedError()
def _snake_case ( self ):
"""simple docstring"""
raise NotImplementedError()
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A = False , **__A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = tokenizer
lowerCamelCase : str = skip_prompt
lowerCamelCase : Union[str, Any] = decode_kwargs
# variables used in the streaming process
lowerCamelCase : int = []
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = True
def _snake_case ( self , __A ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
lowerCamelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCamelCase : List[str] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCamelCase : List[str] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
lowerCamelCase : int = text[self.print_len :]
lowerCamelCase : Optional[Any] = []
lowerCamelCase : List[Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCamelCase : str = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCamelCase : Dict = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def _snake_case ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
lowerCamelCase : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCamelCase : Union[str, Any] = text[self.print_len :]
lowerCamelCase : List[Any] = []
lowerCamelCase : Optional[int] = 0
else:
lowerCamelCase : Tuple = ""
lowerCamelCase : str = True
self.on_finalized_text(__A , stream_end=__A )
def _snake_case ( self , __A , __A = False ):
"""simple docstring"""
print(__A , flush=__A , end="" if not stream_end else None )
def _snake_case ( self , __A ):
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A = False , __A = None , **__A ):
"""simple docstring"""
super().__init__(__A , __A , **__A )
lowerCamelCase : int = Queue()
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[Any] = timeout
def _snake_case ( self , __A , __A = False ):
"""simple docstring"""
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 340
| 0
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowercase = logging.get_logger(__name__)
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : List[str] = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCAmelCase : int = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase : Optional[int] = field(
default=1_2_8 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase : Union[str, Any] = field(
default=_a ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __snake_case ( self : Optional[int]):
a : List[str] = self.task_name.lower()
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = """train"""
UpperCAmelCase : List[Any] = """dev"""
UpperCAmelCase : Optional[int] = """test"""
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = 4_2
UpperCAmelCase : int = 4_2
UpperCAmelCase : Optional[Any] = 4_2
def __init__( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] = None , __UpperCAmelCase : Dict = Split.train , __UpperCAmelCase : Optional[Any] = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , __lowerCAmelCase , )
a : Tuple = args
a : Tuple = glue_processors[args.task_name]()
a : str = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
try:
a : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
a : int = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
a : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : int = label_list[2], label_list[1]
a : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Tuple = cached_features_file + ".lock"
with FileLock(__lowerCAmelCase):
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
a : Dict = time.time()
a : Union[str, Any] = torch.load(__lowerCAmelCase)
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start)
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''')
if mode == Split.dev:
a : int = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : int = self.processor.get_test_examples(args.data_dir)
else:
a : Optional[Any] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Union[str, Any] = examples[:limit_length]
a : Tuple = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
a : List[str] = time.time()
torch.save(self.features , __lowerCAmelCase)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''')
def __len__( self : Optional[Any]):
return len(self.features)
def __getitem__( self : Tuple , __UpperCAmelCase : Tuple):
return self.features[i]
def __snake_case ( self : Optional[int]):
return self.label_list
| 707
|
"""simple docstring"""
def lowercase ( A_ )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
a : List[Any] = [True] * (num + 1)
a : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A_ ):
a : List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 135
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase : int = StableDiffusionDiffEditPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
UpperCamelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : int = frozenset([] )
def _lowercase ( self : str ) -> Optional[int]:
torch.manual_seed(0 )
_a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
_a : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
_a : str = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
_a : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
_a : Optional[int] = CLIPTextModel(UpperCamelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=0 ) -> Dict:
_a : List[str] = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_a : Optional[int] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
_a : Optional[Any] = torch.manual_seed(UpperCamelCase__ )
else:
_a : Union[str, Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_a : Union[str, Any] = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple=0 ) -> List[str]:
_a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
_a : List[Any] = torch.manual_seed(UpperCamelCase__ )
else:
_a : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_a : Optional[Any] = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=0 ) -> Tuple:
_a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : List[Any] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
_a : Optional[Any] = torch.manual_seed(UpperCamelCase__ )
else:
_a : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_a : Union[str, Any] = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : List[str] ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
_a : Optional[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_a : Dict = self.get_dummy_inputs(UpperCamelCase__ )
_a : Any = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_a : Optional[int] = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
_a : List[Any] = self.get_dummy_inputs(UpperCamelCase__ )
_a : Optional[Any] = pipe_loaded(**UpperCamelCase__ )[0]
_a : Tuple = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
_a : List[Any] = """cpu"""
_a : Any = self.get_dummy_components()
_a : Optional[Any] = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a : Optional[int] = self.get_dummy_mask_inputs(UpperCamelCase__ )
_a : Any = pipe.generate_mask(**UpperCamelCase__ )
_a : int = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_a : Optional[int] = np.array([0] * 9 )
_a : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
_a : Any = """cpu"""
_a : Any = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a : int = self.get_dummy_inversion_inputs(UpperCamelCase__ )
_a : Dict = pipe.invert(**UpperCamelCase__ ).images
_a : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_a : int = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_a : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
def _lowercase ( self : List[str] ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _lowercase ( self : Any ) -> Optional[Any]:
_a : Any = """cpu"""
_a : int = self.get_dummy_components()
_a : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
_a : Union[str, Any] = DPMSolverMultistepScheduler(**UpperCamelCase__ )
_a : List[str] = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
_a : Tuple = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a : Union[str, Any] = self.get_dummy_inversion_inputs(UpperCamelCase__ )
_a : List[Any] = pipe.invert(**UpperCamelCase__ ).images
_a : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_a : List[Any] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_a : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase ( cls : Dict ) -> List[str]:
_a : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
_a : int = raw_image.convert("""RGB""" ).resize((768, 768) )
_a : Dict = raw_image
def _lowercase ( self : Optional[int] ) -> str:
_a : Union[str, Any] = torch.manual_seed(0 )
_a : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
_a : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
_a : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a : List[str] = """a bowl of fruit"""
_a : Tuple = """a bowl of pears"""
_a : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
_a : Tuple = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
_a : str = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
_a : List[Any] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[int] = torch.manual_seed(0 )
_a : Any = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
_a : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_a : List[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a : Optional[Any] = """a bowl of fruit"""
_a : Tuple = """a bowl of pears"""
_a : Any = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
_a : Dict = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
_a : str = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
_a : List[Any] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 389
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
def __init__( self :Any , UpperCamelCase__ :Tuple="</s>" , UpperCamelCase__ :str="<unk>" , UpperCamelCase__ :List[Any]="<pad>" , UpperCamelCase__ :Optional[int]=125 , UpperCamelCase__ :Union[str, Any]=None , **UpperCamelCase__ :List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_a = [f'<extra_id_{i}>' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_a = len(set(filter(lambda UpperCamelCase__ : bool("extra_id" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
_a = extra_ids
_a = 2**8 # utf is 8 bits
# define special tokens dict
_a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_a = len(self.special_tokens_encoder )
_a = len(UpperCamelCase__ )
for i, token in enumerate(UpperCamelCase__ ):
_a = self.vocab_size + i - n
_a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self :Any ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None , UpperCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[int] ):
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
_a = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :str ):
_a = [chr(UpperCamelCase__ ) for i in text.encode("utf-8" )]
return tokens
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[Any] ):
if token in self.special_tokens_encoder:
_a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_a = self.added_tokens_encoder[token]
elif len(UpperCamelCase__ ) != 1:
_a = self.unk_token_id
else:
_a = ord(UpperCamelCase__ ) + self._num_special_tokens
return token_id
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :List[str] ):
if index in self.special_tokens_decoder:
_a = self.special_tokens_decoder[index]
else:
_a = chr(index - self._num_special_tokens )
return token
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Any ):
_a = B""
for token in tokens:
if token in self.special_tokens_decoder:
_a = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
_a = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
_a = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
_a = token.encode("utf-8" )
else:
_a = bytes([ord(UpperCamelCase__ )] )
bstring += tok_string
_a = bstring.decode("utf-8" , errors="ignore" )
return string
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[str] = None ):
return ()
| 388
| 0
|
import numpy as np
lowercase__ : Tuple = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self )-> None:
'''simple docstring'''
__UpperCamelCase = np.array(lowerCAmelCase_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = np.where(letter == self.SQUARE )
__UpperCamelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = message.lower()
__UpperCamelCase = message.replace(''' ''' , '''''' )
__UpperCamelCase = message.replace('''j''' , '''i''' )
__UpperCamelCase = np.empty((2, len(lowerCAmelCase_ )) )
for letter_index in range(len(lowerCAmelCase_ ) ):
__UpperCamelCase = self.letter_to_numbers(message[letter_index] )
__UpperCamelCase = numbers[0]
__UpperCamelCase = numbers[1]
__UpperCamelCase = first_step.reshape(2 * len(lowerCAmelCase_ ) )
__UpperCamelCase = ''''''
for numbers_index in range(len(lowerCAmelCase_ ) ):
__UpperCamelCase = int(second_step[numbers_index * 2] )
__UpperCamelCase = int(second_step[(numbers_index * 2) + 1] )
__UpperCamelCase = self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCamelCase = encoded_message + letter
return encoded_message
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = message.lower()
message.replace(''' ''' , '''''' )
__UpperCamelCase = np.empty(2 * len(lowerCAmelCase_ ) )
for letter_index in range(len(lowerCAmelCase_ ) ):
__UpperCamelCase = self.letter_to_numbers(message[letter_index] )
__UpperCamelCase = numbers[0]
__UpperCamelCase = numbers[1]
__UpperCamelCase = first_step.reshape((2, len(lowerCAmelCase_ )) )
__UpperCamelCase = ''''''
for numbers_index in range(len(lowerCAmelCase_ ) ):
__UpperCamelCase = int(second_step[0, numbers_index] )
__UpperCamelCase = int(second_step[1, numbers_index] )
__UpperCamelCase = self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCamelCase = decoded_message + letter
return decoded_message
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ["ViTFeatureExtractor"]
lowercase__ : str = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 451
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A = {
# 1536-bit
5: {
"prime": int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
"generator": 2,
},
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError('Unsupported Group' )
snake_case_ = primes[group]["""prime"""]
snake_case_ = primes[group]["""generator"""]
snake_case_ = int(hexlify(urandom(32 ) ) , base=16 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = pow(self.generator , self.__private_key , self.prime )
return hex(_a )[2:]
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(_a , (self.prime - 1) // 2 , self.prime ) == 1
)
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = int(_a , base=16 )
if not self.is_valid_public_key(_a ):
raise ValueError('Invalid public key' )
snake_case_ = pow(_a , self.__private_key , self.prime )
return shaaaa(str(_a ).encode() ).hexdigest()
@staticmethod
def __lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(_a , (prime - 1) // 2 , _a ) == 1
)
@staticmethod
def __lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 14 ):
"""simple docstring"""
snake_case_ = int(_a , base=16 )
snake_case_ = int(_a , base=16 )
snake_case_ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_a , _a ):
raise ValueError('Invalid public key' )
snake_case_ = pow(_a , _a , _a )
return shaaaa(str(_a ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
'''simple docstring'''
def snake_case ( a_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCamelCase_ : str = head.next, head
while fast and fast.next:
UpperCamelCase_ : Union[str, Any] = fast.next.next
UpperCamelCase_ : Any = slow.next
UpperCamelCase_ : Union[str, Any] = slow.next
UpperCamelCase_ : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
UpperCamelCase_ : List[Any] = None
while second:
UpperCamelCase_ : str = second.next
UpperCamelCase_ : Dict = node
UpperCamelCase_ : str = second
UpperCamelCase_ : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCamelCase_ : List[Any] = node.next
UpperCamelCase_ : Optional[int] = head.next
return True
def snake_case ( a_ : List[str] ) -> Dict:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCamelCase_ : str = head
while fast and fast.next:
UpperCamelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCamelCase_ : List[Any] = [slow.val]
while slow.next:
UpperCamelCase_ : str = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCamelCase_ : Optional[Any] = cur.next
return True
def snake_case ( a_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if not head or not head.next:
return True
UpperCamelCase_ : List[Any] = {}
UpperCamelCase_ : Tuple = 0
while head:
if head.val in d:
d[head.val].append(a_ )
else:
UpperCamelCase_ : List[Any] = [pos]
UpperCamelCase_ : Optional[Any] = head.next
pos += 1
UpperCamelCase_ : Optional[Any] = pos - 1
UpperCamelCase_ : Tuple = 0
for v in d.values():
if len(a_ ) % 2 != 0:
middle += 1
else:
UpperCamelCase_ : List[str] = 0
for i in range(0 , len(a_ ) ):
if v[i] + v[len(a_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 718
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=4_00 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , ):
UpperCamelCase_ : Dict = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase_ : List[str] = parent
UpperCamelCase_ : List[str] = batch_size
UpperCamelCase_ : List[Any] = num_channels
UpperCamelCase_ : List[str] = image_size
UpperCamelCase_ : Tuple = min_resolution
UpperCamelCase_ : List[str] = max_resolution
UpperCamelCase_ : Optional[int] = do_resize
UpperCamelCase_ : List[Any] = size
UpperCamelCase_ : Any = apply_ocr
def _UpperCAmelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """apply_ocr""" ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
# Initialize image_processing
UpperCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __lowerCAmelCase )
self.assertIsInstance(encoding.boxes , __lowerCAmelCase )
# Test batched
UpperCamelCase_ : List[str] = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
UpperCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase_ : List[str] = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
UpperCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase_ : Union[str, Any] = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ):
# with apply_OCR = True
UpperCamelCase_ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
UpperCamelCase_ : int = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
UpperCamelCase_ : Union[str, Any] = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase_ : Optional[Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
UpperCamelCase_ : List[Any] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCAmelCase )
self.assertListEqual(encoding.boxes , __lowerCAmelCase )
# with apply_OCR = False
UpperCamelCase_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 543
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int | str] ):
create_state_space_tree(UpperCamelCase__ , [] , 0 , [0 for i in range(len(UpperCamelCase__ ) )] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int | str] , UpperCamelCase__: list[int | str] , UpperCamelCase__: int , UpperCamelCase__: list[int] , ):
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
for i in range(len(UpperCamelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE__ = True
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 , UpperCamelCase__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE__ = False
_lowerCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 6
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCamelCase )
return config
def A__ (self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(lowerCamelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def A__ (self ):
'''simple docstring'''
if torch_device == "mps":
return
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(lowerCamelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def A__ (self ):
'''simple docstring'''
if torch_device == "mps":
return
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(lowerCamelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
if str(lowerCamelCase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 156
| 0
|
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A = 0 ):
"""simple docstring"""
lowerCamelCase : Any = key
def _snake_case ( self , __A , __A ):
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
lowerCamelCase : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def _snake_case ( self , __A , __A ):
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
lowerCamelCase : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def _snake_case ( self , __A , __A = 0 ):
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
lowerCamelCase : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCamelCase : Union[str, Any] = ''
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def _snake_case ( self , __A , __A = 0 ):
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
lowerCamelCase : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCamelCase : Any = ''
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def _snake_case ( self , __A , __A = 0 ):
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__A , __A ) )
except OSError:
return False
return True
def _snake_case ( self , __A , __A ):
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__A , __A ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 703
|
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase : Tuple = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Any = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Any = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : List[str] =(
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
a_ : List[str] ="CIDAS/clipseg-rd64-refined"
a_ : str ="image_segmenter"
a_ : Any =CLIPSegForImageSegmentation
a_ : List[Any] =["image", "text"]
a_ : List[str] =["image"]
def __init__( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_snake_case , **_snake_case )
def _lowerCAmelCase ( self : str , _snake_case : "Image" , _snake_case : str ) -> Dict:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_snake_case , return_tensors='pt' )
def _lowerCAmelCase ( self : str , _snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
a__ = self.model(**_snake_case ).logits
return logits
def _lowerCAmelCase ( self : int , _snake_case : List[Any] ) -> Dict:
'''simple docstring'''
a__ = outputs.cpu().detach().numpy()
a__ = 0
a__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 232
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Union[str, Any] ="audio-spectrogram-transformer"
def __init__( self : Optional[int] , _snake_case : Tuple=768 , _snake_case : Optional[int]=12 , _snake_case : Dict=12 , _snake_case : List[Any]=3072 , _snake_case : Dict="gelu" , _snake_case : List[Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : int=0.02 , _snake_case : Dict=1E-12 , _snake_case : int=16 , _snake_case : str=True , _snake_case : Any=10 , _snake_case : Any=10 , _snake_case : Tuple=1024 , _snake_case : Dict=128 , **_snake_case : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(**_snake_case )
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = patch_size
a__ = qkv_bias
a__ = frequency_stride
a__ = time_stride
a__ = max_length
a__ = num_mel_bins
| 232
| 1
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = "http://www.mocksite.com/file1.txt"
_a = "\"text\": [\"foo\", \"foo\"]"
_a = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __A :
'''simple docstring'''
lowerCAmelCase_ = 200
lowerCAmelCase_ = {"""Content-Length""": """100"""}
lowerCAmelCase_ = {}
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return [bytes(__lowerCAmelCase , '''utf-8''' )]
def lowerCAmelCase__(*__snake_case ,**__snake_case ) -> Dict:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('''urls_type''' ,[str, list, dict] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
import requests
monkeypatch.setattr(__snake_case ,'''request''' ,__snake_case )
lowerCamelCase__ = URL
if issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = url
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = [url]
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = {'''train''': url}
lowerCamelCase__ = '''dummy'''
lowerCamelCase__ = '''downloads'''
lowerCamelCase__ = tmp_path
lowerCamelCase__ = DownloadConfig(
cache_dir=os.path.join(__snake_case ,__snake_case ) ,use_etag=__snake_case ,)
lowerCamelCase__ = DownloadManager(dataset_name=__snake_case ,download_config=__snake_case )
lowerCamelCase__ = dl_manager.download(__snake_case )
lowerCamelCase__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case ,__snake_case ):
lowerCamelCase__ = [downloaded_paths]
lowerCamelCase__ = [urls]
elif isinstance(__snake_case ,__snake_case ):
assert "train" in downloaded_paths.keys()
lowerCamelCase__ = downloaded_paths.values()
lowerCamelCase__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case ,__snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase__ = Path(__snake_case )
lowerCamelCase__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase__ = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase__ = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowerCamelCase__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' ,[str, list, dict] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = str(__snake_case )
if issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = filename
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = [filename]
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = {'''train''': filename}
lowerCamelCase__ = '''dummy'''
lowerCamelCase__ = xz_file.parent
lowerCamelCase__ = '''extracted'''
lowerCamelCase__ = DownloadConfig(
cache_dir=__snake_case ,use_etag=__snake_case ,)
lowerCamelCase__ = DownloadManager(dataset_name=__snake_case ,download_config=__snake_case )
lowerCamelCase__ = dl_manager.extract(__snake_case )
lowerCamelCase__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case ,__snake_case ):
lowerCamelCase__ = [extracted_paths]
lowerCamelCase__ = [paths]
elif isinstance(__snake_case ,__snake_case ):
assert "train" in extracted_paths.keys()
lowerCamelCase__ = extracted_paths.values()
lowerCamelCase__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case ,__snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase__ = Path(__snake_case )
lowerCamelCase__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case ,etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase__ = extracted_path.read_text()
lowerCamelCase__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case ,start=1 ):
lowerCamelCase__ = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' ,['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = request.getfixturevalue(__snake_case )
lowerCamelCase__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) ,start=1 ):
_test_jsonl(__snake_case ,__snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' ,['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = request.getfixturevalue(__snake_case )
lowerCamelCase__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ) ,start=1 ):
_test_jsonl(__snake_case ,__snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ) ,start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 29
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = path_or_paths
UpperCAmelCase_ : Optional[int] = split if split or isinstance(_snake_case ,_snake_case ) else "train"
UpperCAmelCase_ : Tuple = features
UpperCAmelCase_ : int = cache_dir
UpperCAmelCase_ : Optional[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : List[str] = num_proc
UpperCAmelCase_ : int = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Union[str, Any] = features
UpperCAmelCase_ : Dict = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : int = streaming
UpperCAmelCase_ : Tuple = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
| 71
|
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26
| 0
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True ) -> str:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_a : str = timm.create_model('''levit_128s''' , pretrained=snake_case__ )
else:
_a : int = timm.create_model('''levit_128''' , pretrained=snake_case__ )
if hidden_sizes == 192:
_a : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=snake_case__ )
if hidden_sizes == 256:
_a : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case__ )
if hidden_sizes == 384:
_a : List[Any] = timm.create_model('''levit_384''' , pretrained=snake_case__ )
from_model.eval()
_a : List[Any] = LevitForImageClassificationWithTeacher(snake_case__ ).eval()
_a : Tuple = OrderedDict()
_a : Optional[Any] = from_model.state_dict()
_a : List[Any] = list(from_model.state_dict().keys() )
_a : List[Any] = list(our_model.state_dict().keys() )
print(len(snake_case__ ) , len(snake_case__ ) )
for i in range(len(snake_case__ ) ):
_a : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(snake_case__ )
_a : Union[str, Any] = torch.randn((2, 3, 224, 224) )
_a : Any = from_model(snake_case__ )
_a : Dict = our_model(snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ ), "The model logits don't match the original one."
_a : Optional[int] = name
print(snake_case__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_a : Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True ) -> Optional[Any]:
"""simple docstring"""
_a : List[Any] = '''imagenet-1k-id2label.json'''
_a : Dict = 1000
_a : int = (1, num_labels)
_a : List[Any] = '''huggingface/label-files'''
_a : Optional[int] = num_labels
_a : Union[str, Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
_a : int = {int(snake_case__ ): v for k, v in idalabel.items()}
_a : Tuple = idalabel
_a : Union[str, Any] = {v: k for k, v in idalabel.items()}
_a : Any = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
_a : str = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_a : Union[str, Any] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 717
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=" " ) -> List[str]:
"""simple docstring"""
_a : int = text.split(UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )]
def UpperCamelCase__ ( UpperCAmelCase ) -> dict:
"""simple docstring"""
_a , _a : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(UpperCAmelCase ):
titles.append(title if title is not None else '''''' )
texts.append(UpperCAmelCase )
return {"title": titles, "text": texts}
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> dict:
"""simple docstring"""
_a : str = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
_a : int = ctx_encoder(input_ids.to(device=UpperCAmelCase ) , return_dict=UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> str:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_a : Any = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_a : Union[str, Any] = dataset.map(UpperCAmelCase , batched=UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_a : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase )
_a : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_a : Optional[Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
_a : str = dataset.map(
partial(UpperCAmelCase , ctx_encoder=UpperCAmelCase , ctx_tokenizer=UpperCAmelCase ) , batched=UpperCAmelCase , batch_size=processing_args.batch_size , features=UpperCAmelCase , )
# And finally save your dataset
_a : List[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_a : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=UpperCAmelCase )
# And save the index
_a : List[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=str(Path(UpperCamelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowercase = field(
default=UpperCamelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowercase = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowercase = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowercase = field(
default=str(Path(UpperCamelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=UpperCamelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowercase = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowercase = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 307
| 0
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCAmelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = _TestCommandArgs(dataset=_SCREAMING_SNAKE_CASE , all_configs=_SCREAMING_SNAKE_CASE , save_infos=_SCREAMING_SNAKE_CASE )
_snake_case = TestCommand(*_SCREAMING_SNAKE_CASE )
test_command.run()
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """README.md""" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
_snake_case = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
_snake_case = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_snake_case, _snake_case = getattr(dataset_infos["""default"""] , _SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos["""default"""] , _SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(_SCREAMING_SNAKE_CASE ) == list(_SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 585
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 585
| 1
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 714
|
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A__ ( _a : float , _a : str , _a : str ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case__ : Tuple =(
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(_a )}"
)
raise ValueError(_a )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 448
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( A__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =KandinskyInpaintPipeline
UpperCamelCase__ =["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase__ =[
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase__ =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase__ =False
@property
def snake_case_ ( self : Optional[Any] ):
return 32
@property
def snake_case_ ( self : List[Any] ):
return 32
@property
def snake_case_ ( self : List[Any] ):
return self.time_input_dim
@property
def snake_case_ ( self : Dict ):
return self.time_input_dim * 4
@property
def snake_case_ ( self : int ):
return 100
@property
def snake_case_ ( self : Any ):
UpperCAmelCase_ :List[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def snake_case_ ( self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ :Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase_ :Union[str, Any] = MultilingualCLIP(__A )
UpperCAmelCase_ :List[str] = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ :Optional[int] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ :int = UNetaDConditionModel(**__A )
return model
@property
def snake_case_ ( self : List[str] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self : Tuple ):
UpperCAmelCase_ :Optional[Any] = self.dummy_text_encoder
UpperCAmelCase_ :Optional[Any] = self.dummy_tokenizer
UpperCAmelCase_ :Dict = self.dummy_unet
UpperCAmelCase_ :Any = self.dummy_movq
UpperCAmelCase_ :Optional[int] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__A , )
UpperCAmelCase_ :Optional[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case_ ( self : int , snake_case : Any , snake_case : List[Any]=0 ):
UpperCAmelCase_ :Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__A ) ).to(__A )
UpperCAmelCase_ :Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__A )
# create init_image
UpperCAmelCase_ :Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
UpperCAmelCase_ :int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ :Any = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCAmelCase_ :List[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_ :Optional[int] = 0
if str(__A ).startswith('''mps''' ):
UpperCAmelCase_ :Optional[int] = torch.manual_seed(__A )
else:
UpperCAmelCase_ :Any = torch.Generator(device=__A ).manual_seed(__A )
UpperCAmelCase_ :List[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :int = '''cpu'''
UpperCAmelCase_ :List[str] = self.get_dummy_components()
UpperCAmelCase_ :Optional[int] = self.pipeline_class(**__A )
UpperCAmelCase_ :List[str] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase_ :int = pipe(**self.get_dummy_inputs(__A ) )
UpperCAmelCase_ :List[Any] = output.images
UpperCAmelCase_ :str = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
UpperCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ :List[Any] = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def snake_case_ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase_ :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase_ :Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase_ :Dict = 0
UpperCAmelCase_ :int = '''a hat'''
UpperCAmelCase_ :List[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__A )
UpperCAmelCase_ :Dict = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
UpperCAmelCase_ :int = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
UpperCAmelCase_ :List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ :Dict = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ :List[Any] = pipeline(
__A , image=__A , mask_image=__A , image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase_ :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
| 608
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list ) -> float:
SCREAMING_SNAKE_CASE_ : Dict =0
while len(UpperCAmelCase_ ) > 1:
SCREAMING_SNAKE_CASE_ : Tuple =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE_ : int =files.index(min(UpperCAmelCase_ ) )
temp += files[min_index]
files.pop(UpperCAmelCase_ )
files.append(UpperCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCamelCase_ = getLogger(__name__)
UpperCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
def _UpperCAmelCase ( A , A , A , A = 8 , A = DEFAULT_DEVICE , A=False , A="summarization" , A=None , **A , ):
'''simple docstring'''
UpperCAmelCase__ =Path(_SCREAMING_SNAKE_CASE ).open("w" , encoding="utf-8" )
UpperCAmelCase__ =str(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ =AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
if fpaa:
UpperCAmelCase__ =model.half()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCAmelCase__ =time.time()
# update config with task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if prefix is None:
UpperCAmelCase__ =prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ):
UpperCAmelCase__ =[prefix + text for text in examples_chunk]
UpperCAmelCase__ =tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="pt" , truncation=_SCREAMING_SNAKE_CASE , padding="longest" ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ =model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase__ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
UpperCAmelCase__ =int(time.time() - start_time ) # seconds
UpperCAmelCase__ =len(_SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _UpperCAmelCase ( ):
'''simple docstring'''
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def _UpperCAmelCase ( A=True ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("model_name" , type=_SCREAMING_SNAKE_CASE , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=_SCREAMING_SNAKE_CASE , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=_SCREAMING_SNAKE_CASE , help="where to save summaries" )
parser.add_argument("--reference_path" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=_SCREAMING_SNAKE_CASE , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_SCREAMING_SNAKE_CASE , default=8 , required=_SCREAMING_SNAKE_CASE , help="batch size" )
parser.add_argument(
"--n_obs" , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=_SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCAmelCase__ , UpperCAmelCase__ =parser.parse_known_args()
UpperCAmelCase__ =parse_numeric_n_bool_cl_kwargs(_SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCAmelCase__ =[" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCAmelCase__ =examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
UpperCAmelCase__ =generate_summaries_or_translations(
_SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
UpperCAmelCase__ =calculate_bleu if "translation" in args.task else calculate_rouge
UpperCAmelCase__ =[x.rstrip() for x in open(args.save_path ).readlines()]
UpperCAmelCase__ =[x.rstrip() for x in open(args.reference_path ).readlines()][: len(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase__ =score_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
scores.update(_SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(_SCREAMING_SNAKE_CASE )
if args.info:
UpperCAmelCase__ =args.info
if verbose:
print(_SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(_SCREAMING_SNAKE_CASE , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 702
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'help': 'Should contain the data files for the task.'} )
__UpperCamelCase = field(
default=1_2_8, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase = field(
default=a, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase__ =processors[data_args.task_name]()
UpperCAmelCase__ =processor.get_labels()
UpperCAmelCase__ =len(A )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A ) -> Dict:
UpperCAmelCase__ =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A , p.label_ids )}
# Data collator
UpperCAmelCase__ =DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase__ =Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase__ ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase__ =trainer.evaluate()
UpperCAmelCase__ =os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
results.update(A )
return results
def _UpperCAmelCase ( A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 510
| 0
|
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: Any = []
UpperCamelCase_: Union[str, Any] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
UpperCamelCase_: Optional[int] = len(SCREAMING_SNAKE_CASE__ ) if (len(SCREAMING_SNAKE_CASE__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(SCREAMING_SNAKE_CASE__ ) , 'Postfix'.center(SCREAMING_SNAKE_CASE__ ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(SCREAMING_SNAKE_CASE__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(SCREAMING_SNAKE_CASE__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(SCREAMING_SNAKE_CASE__ ) == 0:
stack.append(SCREAMING_SNAKE_CASE__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(SCREAMING_SNAKE_CASE__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(SCREAMING_SNAKE_CASE__ ) # push x to stack
print(
x.center(8 ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , sep=' | ' , ) # Output in tabular format
while len(SCREAMING_SNAKE_CASE__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , sep=' | ' , ) # Output in tabular format
return "".join(SCREAMING_SNAKE_CASE__ ) # return Postfix as str
def snake_case (UpperCAmelCase__ ) -> Tuple:
UpperCamelCase_: int = list(infix[::-1] ) # reverse the infix equation
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if infix[i] == "(":
UpperCamelCase_: Any = ')' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase_: Any = '(' # change ")" to "("
return (infix_2_postfix(''.join(SCREAMING_SNAKE_CASE__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
A_ : Any = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 57
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[str] =(PNDMScheduler,)
UpperCamelCase__ : Dict =(('num_inference_steps', 50),)
def lowerCamelCase ( self : Dict , **lowercase_ : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def lowerCamelCase ( self : Any , lowercase_ : str=0 , **lowercase_ : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Tuple =self.dummy_sample
_lowerCamelCase : int =0.1 * sample
_lowerCamelCase : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : str =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : str =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
_lowerCamelCase : Union[str, Any] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[int] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : List[str]=0 , **lowercase_ : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Dict =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
_lowerCamelCase : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Any =self.get_scheduler_config()
_lowerCamelCase : int =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : Optional[int] =dummy_past_residuals[:]
_lowerCamelCase : List[str] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Optional[Any] , **lowercase_ : Any ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : Any =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =10
_lowerCamelCase : str =self.dummy_model()
_lowerCamelCase : Union[str, Any] =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCamelCase : Union[str, Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : Any =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCamelCase : List[Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : int =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[int] =kwargs.pop('num_inference_steps' , lowercase_ )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : List[str] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , 'set_timesteps' ):
_lowerCamelCase : Tuple =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : List[Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCamelCase : Union[str, Any] =dummy_past_residuals[:]
_lowerCamelCase : List[Any] =scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCamelCase : Tuple =scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
_lowerCamelCase : Optional[int] =self.scheduler_classes[0]
_lowerCamelCase : Dict =self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =27
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Dict =self.dummy_sample
_lowerCamelCase : List[Any] =0.1 * sample
_lowerCamelCase : List[Any] =self.get_scheduler_config()
_lowerCamelCase : List[str] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCamelCase : Tuple =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
_lowerCamelCase : Dict =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop()
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Dict =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Union[str, Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 464
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Any = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return int((input_a, input_a).count(1 ) != 0 )
def lowercase_ ( ):
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 545
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__lowerCamelCase = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
__lowerCamelCase = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__lowerCamelCase = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__lowerCamelCase = sorted(arg_to_scheduler.keys())
__lowerCamelCase = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class UpperCAmelCase ( pl.LightningModule ):
def __init__( self : Optional[int] , __lowerCamelCase : argparse.Namespace , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict="base" , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__lowerCamelCase )
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :Any = Path(self.hparams.output_dir )
UpperCAmelCase__ :Optional[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__lowerCamelCase , **__lowerCamelCase , )
else:
UpperCAmelCase__ :PretrainedConfig = config
UpperCAmelCase__ :int = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , __lowerCamelCase , __lowerCamelCase ):
assert hasattr(self.config , __lowerCamelCase ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , __lowerCamelCase , getattr(self.hparams , __lowerCamelCase ) )
if tokenizer is None:
UpperCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__lowerCamelCase , )
else:
UpperCAmelCase__ :PreTrainedTokenizer = tokenizer
UpperCAmelCase__ :List[Any] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase__ :List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__lowerCamelCase , )
else:
UpperCAmelCase__ :List[Any] = model
def __SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : str ):
UpperCAmelCase__ :Union[str, Any] = self.model_type.from_pretrained(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase__ :str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase__ :Any = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :Union[str, Any] = self.model
UpperCAmelCase__ :int = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase__ :List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase__ :List[Any] = Adafactor(
__lowerCamelCase , lr=self.hparams.learning_rate , scale_parameter=__lowerCamelCase , relative_step=__lowerCamelCase )
else:
UpperCAmelCase__ :List[str] = AdamW(
__lowerCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase__ :List[Any] = optimizer
UpperCAmelCase__ :Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
return self.validation_step(__lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : Dict ):
return self.validation_end(__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase__ :Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCamelCase : Tuple ):
if stage == "test":
UpperCAmelCase__ :str = len(self.test_dataloader().dataset )
else:
UpperCAmelCase__ :Union[str, Any] = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = len(self.train_dataloader().dataset )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : bool = False ):
raise NotImplementedError('''You must implement this for your task''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.train_loader
def __SCREAMING_SNAKE_CASE ( self : int ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : int ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
__lowerCamelCase , list(filter(__lowerCamelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Dict[str, Any] ):
UpperCAmelCase__ :Tuple = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase__ :List[Any] = self.step_count
self.model.save_pretrained(__lowerCamelCase )
self.tokenizer.save_pretrained(__lowerCamelCase )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''cache''' ) , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=__lowerCamelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=__lowerCamelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=__lowerCamelCase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=__lowerCamelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=__lowerCamelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=__lowerCamelCase , metavar=__lowerCamelCase , type=__lowerCamelCase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowerCamelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__lowerCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=__lowerCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=__lowerCamelCase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__lowerCamelCase )
parser.add_argument('''--train_batch_size''' , default=3_2 , type=__lowerCamelCase )
parser.add_argument('''--eval_batch_size''' , default=3_2 , type=__lowerCamelCase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class UpperCAmelCase ( pl.Callback ):
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase ( pl.Callback ):
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__lowerCamelCase )
class UpperCAmelCase ( pl.Callback ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
UpperCAmelCase__ :Optional[int] = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase__ :Optional[int] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase__ :List[Any] = trainer.callback_metrics
# Log results
for key in sorted(__lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowerCamelCase , str(metrics[key] ) ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase__ :Optional[Any] = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase__ :Union[str, Any] = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(__lowerCamelCase , '''w''' ) as writer:
for key in sorted(__lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowerCamelCase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__lowerCamelCase , str(metrics[key] ) ) )
def a__ ( UpperCamelCase_ : List[Any], UpperCamelCase_ : Tuple ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''', default=str(Path(UpperCamelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ), type=UpperCamelCase_, help='''The output directory where the model predictions and checkpoints will be written.''', )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=UpperCamelCase_, default='''O2''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_tpu_cores''', dest='''tpu_cores''', type=UpperCamelCase_ )
parser.add_argument('''--max_grad_norm''', dest='''gradient_clip_val''', default=1.0, type=UpperCamelCase_, help='''Max gradient norm''' )
parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' )
parser.add_argument('''--do_predict''', action='''store_true''', help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''', dest='''accumulate_grad_batches''', type=UpperCamelCase_, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', )
parser.add_argument('''--seed''', type=UpperCamelCase_, default=42, help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''', default=str(Path(UpperCamelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ), type=UpperCamelCase_, help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''', )
def a__ ( UpperCamelCase_ : BaseTransformer, UpperCamelCase_ : argparse.Namespace, UpperCamelCase_ : int=None, UpperCamelCase_ : str=True, UpperCamelCase_ : Union[str, Any]=[], UpperCamelCase_ : Union[str, Any]=None, UpperCamelCase_ : str=None, **UpperCamelCase_ : str, ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase__ :Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=UpperCamelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase__ :List[str] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix='''checkpoint''', monitor='''val_loss''', mode='''min''', save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(UpperCamelCase_ )
if logging_callback is None:
UpperCAmelCase__ :Tuple = LoggingCallback()
UpperCAmelCase__ :List[str] = {}
if args.fpaa:
UpperCAmelCase__ :Optional[Any] = 16
if args.gpus > 1:
UpperCAmelCase__ :Optional[Any] = '''auto'''
UpperCAmelCase__ :str = '''ddp'''
UpperCAmelCase__ :Dict = args.accumulate_grad_batches
UpperCAmelCase__ :Dict = None
UpperCAmelCase__ :Dict = '''auto'''
UpperCAmelCase__ :List[Any] = pl.Trainer.from_argparse_args(
UpperCamelCase_, weights_summary=UpperCamelCase_, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=UpperCamelCase_, val_check_interval=1, num_sanity_val_steps=2, **UpperCamelCase_, )
if args.do_train:
trainer.fit(UpperCamelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 467
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=9_9 , __lowerCamelCase : int=1_3 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Dict=9 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : int=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Tuple=3_7 , __lowerCamelCase : Any=8 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=0.0_02 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : str=0 , __lowerCamelCase : str=0 , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ):
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :str = batch_size
UpperCAmelCase__ :int = encoder_seq_length
UpperCAmelCase__ :Optional[int] = decoder_seq_length
# For common tests
UpperCAmelCase__ :int = self.decoder_seq_length
UpperCAmelCase__ :List[Any] = is_training
UpperCAmelCase__ :Any = use_attention_mask
UpperCAmelCase__ :Tuple = use_labels
UpperCAmelCase__ :Optional[int] = vocab_size
UpperCAmelCase__ :Optional[Any] = hidden_size
UpperCAmelCase__ :Optional[Any] = num_hidden_layers
UpperCAmelCase__ :Tuple = num_attention_heads
UpperCAmelCase__ :str = d_ff
UpperCAmelCase__ :Tuple = relative_attention_num_buckets
UpperCAmelCase__ :int = dropout_rate
UpperCAmelCase__ :Dict = initializer_factor
UpperCAmelCase__ :int = eos_token_id
UpperCAmelCase__ :Tuple = pad_token_id
UpperCAmelCase__ :Tuple = decoder_start_token_id
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = decoder_layers
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , ):
if attention_mask is None:
UpperCAmelCase__ :Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase__ :Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase__ :List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
UpperCAmelCase__ :Optional[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
UpperCAmelCase__ :List[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase__ :int = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Dict = self.get_config()
UpperCAmelCase__ :Union[str, Any] = config.num_attention_heads
UpperCAmelCase__ :Dict = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, input_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ , UpperCAmelCase__ :int = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ):
UpperCAmelCase__ :str = UMTaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase__ :Optional[int] = model(
input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ :List[Any] = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
UpperCAmelCase__ :Dict = result.last_hidden_state
UpperCAmelCase__ :Tuple = result.past_key_values
UpperCAmelCase__ :Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , ):
UpperCAmelCase__ :Tuple = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
# first forward pass
UpperCAmelCase__ :Dict = model(__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCAmelCase__ :Any = model(__lowerCamelCase )
UpperCAmelCase__ :List[str] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
UpperCAmelCase__ , UpperCAmelCase__ :int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ :int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCAmelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ :Union[str, Any] = model(__lowerCamelCase )['''last_hidden_state''']
UpperCAmelCase__ :str = model(__lowerCamelCase , past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
UpperCAmelCase__ :Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ :Any = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase__ :Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str , ):
UpperCAmelCase__ :Optional[Any] = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval()
UpperCAmelCase__ :Any = model(**__lowerCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() )
@require_torch
class UpperCAmelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase = [0.8, 0.9]
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :Optional[Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :Optional[int] = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__lowerCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Tuple = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :int = config_and_inputs[0]
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration(__lowerCamelCase ).eval()
model.to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
}
for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ):
UpperCAmelCase__ :Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase__ :Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase__ :List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCamelCase ).to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCamelCase , legacy=__lowerCamelCase )
UpperCAmelCase__ :Dict = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCAmelCase__ :Optional[Any] = tokenizer(__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase ).input_ids
# fmt: off
UpperCAmelCase__ :List[Any] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = model.generate(input_ids.to(__lowerCamelCase ) )
UpperCAmelCase__ :str = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCAmelCase__ :Dict = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 467
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = ["""pixel_values"""]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 2_5_5 , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> None:
super().__init__(**UpperCAmelCase__ )
_A : Optional[Any] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
_A : Tuple = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : Any = do_resize
_A : str = size
_A : Any = resample
_A : Optional[int] = do_rescale
_A : List[Any] = rescale_factor
_A : List[Any] = do_normalize
_A : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_A : str = image_std if image_std is not None else OPENAI_CLIP_STD
_A : int = do_convert_rgb
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
_A : int = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_A : Optional[Any] = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ) -> PIL.Image.Image:
_A : str = do_resize if do_resize is not None else self.do_resize
_A : Optional[int] = resample if resample is not None else self.resample
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_A : int = image_mean if image_mean is not None else self.image_mean
_A : Dict = image_std if image_std is not None else self.image_std
_A : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A : Tuple = size if size is not None else self.size
_A : Any = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : List[Any] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A : int = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
_A : int = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
_A : Optional[int] = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_A : List[str] = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
_A : Any = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
_A : List[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_A : str = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
return encoded_outputs
| 417
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=1_3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=9_9 , UpperCAmelCase__=3_2 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=3_7 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=1_6 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__="None" , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ) -> List[str]:
_A : Tuple = parent
_A : List[Any] = batch_size
_A : str = seq_length
_A : Tuple = is_training
_A : Tuple = use_input_mask
_A : Tuple = use_token_type_ids
_A : Any = use_labels
_A : Tuple = vocab_size
_A : List[Any] = hidden_size
_A : Tuple = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : str = intermediate_size
_A : Dict = hidden_act
_A : int = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Tuple = type_vocab_size
_A : str = type_sequence_label_size
_A : Tuple = initializer_range
_A : Union[str, Any] = num_labels
_A : Union[str, Any] = num_choices
_A : Optional[Any] = relative_attention
_A : Optional[Any] = position_biased_input
_A : int = pos_att_type
_A : int = scope
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[int] = None
if self.use_input_mask:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A : List[str] = None
if self.use_token_type_ids:
_A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Tuple = None
_A : Any = None
_A : List[Any] = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = ids_tensor([self.batch_size] , self.num_choices )
_A : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> Any:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowerCamelCase ( self ) -> str:
_A : List[str] = self.get_config()
_A : Union[str, Any] = 3_0_0
return config
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Optional[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
_A : List[Any] = DebertaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
_A : Tuple = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
_A : Any = model(UpperCAmelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
_A : Any = DebertaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
_A : Any = self.num_labels
_A : List[Any] = DebertaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
_A : List[Any] = self.num_labels
_A : int = DebertaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
_A : Optional[Any] = DebertaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Union[str, Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ) -> Optional[int]:
_A : Tuple = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Union[str, Any] = config_and_inputs
_A : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _lowerCamelCase ( self ) -> Dict:
_A : str = DebertaModelTester(self )
_A : int = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> int:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Dict:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> int:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> List[str]:
_A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase__ )
@slow
def _lowerCamelCase ( self ) -> int:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[int] = DebertaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _lowerCamelCase ( self ) -> int:
pass
@slow
def _lowerCamelCase ( self ) -> int:
_A : Dict = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_A : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_A : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
# compare the actual values for a slice.
_A : Optional[Any] = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 417
| 1
|
'''simple docstring'''
from string import ascii_uppercase
A = {str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
raise TypeError('int() can\'t convert non-string with explicit base')
if num < 0:
raise ValueError('parameter must be positive int')
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
raise TypeError('\'str\' object cannot be interpreted as an integer')
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
raise TypeError('\'float\' object cannot be interpreted as an integer')
if base in (0, 1):
raise ValueError('base must be >= 2')
if base > 36:
raise ValueError('base must be <= 36')
lowerCamelCase : Optional[int] = ''
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Any = 0
while div != 1:
lowerCamelCase , lowerCamelCase : Tuple = divmod(UpperCAmelCase__ , UpperCAmelCase__)
if base >= 11 and 9 < mod < 36:
lowerCamelCase : Union[str, Any] = ALPHABET_VALUES[str(UpperCAmelCase__)]
else:
lowerCamelCase : Union[str, Any] = str(UpperCAmelCase__)
new_value += actual_value
lowerCamelCase : List[Any] = num // base
lowerCamelCase : int = div
if div == 0:
return str(new_value[::-1])
elif div == 1:
new_value += str(UpperCAmelCase__)
return str(new_value[::-1])
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 320
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __snake_case ( a__):
_lowerCAmelCase = '''roformer'''
def __init__( self, A=5_0000, A=None, A=768, A=12, A=12, A=3072, A="gelu", A=0.1, A=0.1, A=1536, A=2, A=0.02, A=1e-12, A=0, A=False, A=True, **A, ):
"""simple docstring"""
super().__init__(pad_token_id=A, **A )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size if embedding_size is None else embedding_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : List[Any] = rotary_value
lowerCamelCase : Dict = use_cache
class __snake_case ( a__):
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
lowerCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 320
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
def __init__( self : str , _a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =13
_SCREAMING_SNAKE_CASE =7
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =99
_SCREAMING_SNAKE_CASE =32
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =37
_SCREAMING_SNAKE_CASE ='''gelu'''
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =512
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =0.02
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =None
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self : Dict , _a : Dict , _a : str , _a : Optional[Any] , _a : List[Any] , _a : Optional[Any] , _a : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFEsmModel(config=_a )
_SCREAMING_SNAKE_CASE ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =[input_ids, input_mask]
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , _a : int , _a : Any , _a : Tuple , _a : Any , _a : Optional[Any] , _a : Union[str, Any] , _a : Any , _a : List[str] , ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =TFEsmModel(config=_a )
_SCREAMING_SNAKE_CASE ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =[input_ids, input_mask]
_SCREAMING_SNAKE_CASE =model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , _a : List[Any] , _a : List[Any] , _a : Dict , _a : int , _a : List[str] , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFEsmForMaskedLM(config=_a )
_SCREAMING_SNAKE_CASE =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : int , _a : List[Any] , _a : Any , _a : Optional[Any] , _a : Dict , _a : List[Any] , _a : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFEsmForTokenClassification(config=_a )
_SCREAMING_SNAKE_CASE ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFEsmModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_SCREAMING_SNAKE_CASE =model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE =model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_SCREAMING_SNAKE_CASE =tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
_SCREAMING_SNAKE_CASE =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_SCREAMING_SNAKE_CASE =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 710
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case_ : Any = datasets.utils.logging.get_logger(__name__)
snake_case_ : List[Any] = ['''names''', '''prefix''']
snake_case_ : Dict = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case_ : Dict = ['''encoding_errors''', '''on_bad_lines''']
snake_case_ : Optional[Any] = ['''date_format''']
@dataclass
class A__ ( datasets.BuilderConfig ):
UpperCAmelCase = ","
UpperCAmelCase = None
UpperCAmelCase = "infer"
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = "."
UpperCAmelCase = None
UpperCAmelCase = '"'
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = 10000
UpperCAmelCase = None
UpperCAmelCase = "strict"
UpperCAmelCase = "error"
UpperCAmelCase = None
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.delimiter is not None:
_SCREAMING_SNAKE_CASE =self.delimiter
if self.column_names is not None:
_SCREAMING_SNAKE_CASE =self.column_names
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
UpperCAmelCase = CsvConfig
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Dict , _a : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE =data_files
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_SCREAMING_SNAKE_CASE =[]
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __UpperCamelCase ( self : Tuple , _a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
_SCREAMING_SNAKE_CASE =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE =table_cast(_a , _a )
return pa_table
def __UpperCamelCase ( self : str , _a : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_SCREAMING_SNAKE_CASE =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
_SCREAMING_SNAKE_CASE =pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
_SCREAMING_SNAKE_CASE =pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(_a )}: {e}" )
raise
| 191
| 0
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_lowerCamelCase = parser.parse_args()
if args.model_type == "roberta":
_lowerCamelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
_lowerCamelCase = 'roberta'
elif args.model_type == "gpt2":
_lowerCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
_lowerCamelCase = 'transformer'
_lowerCamelCase = model.state_dict()
_lowerCamelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_lowerCamelCase = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_lowerCamelCase = F'''{prefix}.embeddings.{w}.weight'''
_lowerCamelCase = state_dict[param_name]
for w in ["weight", "bias"]:
_lowerCamelCase = F'''{prefix}.embeddings.LayerNorm.{w}'''
_lowerCamelCase = state_dict[param_name]
# Transformer Blocks #
_lowerCamelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_lowerCamelCase = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
_lowerCamelCase = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_lowerCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_lowerCamelCase = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowerCamelCase = state_dict[F'''lm_head.dense.{w}''']
_lowerCamelCase = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_lowerCamelCase = state_dict[F'''{prefix}.ln_f.{w}''']
_lowerCamelCase = state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 114
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowerCamelCase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCAmelCase( lowercase_ ):
if isinstance(lowercase_ , torch.Tensor ):
return image
elif isinstance(lowercase_ , PIL.Image.Image ):
_lowerCamelCase : List[str] = [image]
_lowerCamelCase : Optional[int] = [trans(img.convert('''RGB''' ) ) for img in image]
_lowerCamelCase : Dict = torch.stack(lowercase_ )
return image
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : Optional[Any] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=a__ , scheduler=a__)
def __snake_case ( self , a__):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""")
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : List[str] = min(int(num_inference_steps * strength) , a__)
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep , 0)
_lowerCamelCase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self , a__ , a__ , a__ , a__ , a__ , a__=None):
"""simple docstring"""
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__)}""")
_lowerCamelCase : Any = image.to(device=a__ , dtype=a__)
if isinstance(a__ , a__) and len(a__) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(a__)}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
_lowerCamelCase : Any = init_latents.shape
_lowerCamelCase : Tuple = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__)
# get latents
print('''add noise to latents at timestep''' , a__)
_lowerCamelCase : List[Any] = self.scheduler.add_noise(a__ , a__ , a__)
_lowerCamelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self , a__ = None , a__ = 0.8 , a__ = 1 , a__ = None , a__ = 0.0 , a__ = 50 , a__ = None , a__ = "pil" , a__ = True , ):
"""simple docstring"""
self.check_inputs(a__)
# 2. Preprocess image
_lowerCamelCase : Union[str, Any] = preprocess(a__)
# 3. set timesteps
self.scheduler.set_timesteps(a__ , device=self.device)
_lowerCamelCase, _lowerCamelCase : Any = self.get_timesteps(a__ , a__ , self.device)
_lowerCamelCase : Union[str, Any] = timesteps[:1].repeat(a__)
# 4. Prepare latent variables
_lowerCamelCase : int = self.prepare_latents(a__ , a__ , a__ , self.unet.dtype , self.device , a__)
_lowerCamelCase : int = latents
# 5. Denoising loop
for t in self.progress_bar(a__):
# 1. predict noise model_output
_lowerCamelCase : Dict = self.unet(a__ , a__).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : Tuple = self.scheduler.step(
a__ , a__ , a__ , eta=a__ , use_clipped_model_output=a__ , generator=a__ , ).prev_sample
_lowerCamelCase : str = (image / 2 + 0.5).clamp(0 , 1)
_lowerCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_lowerCamelCase : Any = self.numpy_to_pil(a__)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a__)
| 114
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=4_00 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 2_55 , a__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def _UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , a__ , a__=False ):
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(a__ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(a__ , key=lambda a__ : item[0] )[0]
_lowerCamelCase = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = DetaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
_lowerCamelCase = DetaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''do_rescale''' ) )
self.assertTrue(hasattr(a__ , '''do_pad''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , a__ )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ):
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = DetaImageProcessor()
_lowerCamelCase = image_processing(images=a__ , annotations=a__ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
@slow
def _UpperCAmelCase ( self ):
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = DetaImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , a__ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
| 297
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=4_00 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 2_55 , a__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def _UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , a__ , a__=False ):
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(a__ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(a__ , key=lambda a__ : item[0] )[0]
_lowerCamelCase = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = DetaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
_lowerCamelCase = DetaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''do_rescale''' ) )
self.assertTrue(hasattr(a__ , '''do_pad''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , a__ )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ):
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = DetaImageProcessor()
_lowerCamelCase = image_processing(images=a__ , annotations=a__ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
@slow
def _UpperCAmelCase ( self ):
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = DetaImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , a__ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
| 297
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
__SCREAMING_SNAKE_CASE :Tuple = None
__SCREAMING_SNAKE_CASE :List[Any] = {
'''7B''': 11008,
'''13B''': 13824,
'''30B''': 17920,
'''65B''': 22016,
'''70B''': 28672,
}
__SCREAMING_SNAKE_CASE :Dict = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict=1 , __lowercase : Optional[Any]=256 ) -> List[str]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(__lowercase , "r" ) as f:
return json.load(__lowercase )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> Any:
'''simple docstring'''
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : int=True ) -> str:
'''simple docstring'''
os.makedirs(__lowercase , exist_ok=__lowercase )
_UpperCAmelCase = os.path.join(__lowercase , "tmp" )
os.makedirs(__lowercase , exist_ok=__lowercase )
_UpperCAmelCase = read_json(os.path.join(__lowercase , "params.json" ) )
_UpperCAmelCase = NUM_SHARDS[model_size]
_UpperCAmelCase = params["n_layers"]
_UpperCAmelCase = params["n_heads"]
_UpperCAmelCase = n_heads // num_shards
_UpperCAmelCase = params["dim"]
_UpperCAmelCase = dim // n_heads
_UpperCAmelCase = 1_0000.0
_UpperCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowercase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_UpperCAmelCase = params["n_kv_heads"] # for GQA / MQA
_UpperCAmelCase = n_heads_per_shard // num_key_value_heads
_UpperCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_UpperCAmelCase = n_heads
_UpperCAmelCase = n_heads_per_shard
_UpperCAmelCase = dim
# permute for sliced rotary
def permute(__lowercase : Optional[Any] , __lowercase : List[Any]=n_heads , __lowercase : List[str]=dim , __lowercase : str=dim ):
return w.view(__lowercase , dima // n_heads // 2 , 2 , __lowercase ).transpose(1 , 2 ).reshape(__lowercase , __lowercase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_UpperCAmelCase = torch.load(os.path.join(__lowercase , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
_UpperCAmelCase = [
torch.load(os.path.join(__lowercase , f'consolidated.{i:02d}.pth' ) , map_location="cpu" )
for i in range(__lowercase )
]
_UpperCAmelCase = 0
_UpperCAmelCase = {"weight_map": {}}
for layer_i in range(__lowercase ):
_UpperCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_UpperCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_UpperCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_UpperCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowercase , __lowercase , __lowercase )
for i in range(__lowercase )
] , dim=0 , ).reshape(__lowercase , __lowercase ) )
_UpperCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowercase , __lowercase , __lowercase )
for i in range(__lowercase )
] , dim=0 , ).reshape(__lowercase , __lowercase ) , __lowercase , __lowercase , __lowercase , )
_UpperCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowercase , __lowercase , __lowercase )
for i in range(__lowercase )
] , dim=0 , ).reshape(__lowercase , __lowercase )
_UpperCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowercase )] , dim=1 )
_UpperCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowercase )] , dim=0 )
_UpperCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowercase )] , dim=1 )
_UpperCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowercase )] , dim=0 )
_UpperCAmelCase = inv_freq
for k, v in state_dict.items():
_UpperCAmelCase = filename
param_count += v.numel()
torch.save(__lowercase , os.path.join(__lowercase , __lowercase ) )
_UpperCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_UpperCAmelCase = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
_UpperCAmelCase = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(__lowercase )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(__lowercase )] , dim=0 ),
}
for k, v in state_dict.items():
_UpperCAmelCase = filename
param_count += v.numel()
torch.save(__lowercase , os.path.join(__lowercase , __lowercase ) )
# Write configs
_UpperCAmelCase = {"total_size": param_count * 2}
write_json(__lowercase , os.path.join(__lowercase , "pytorch_model.bin.index.json" ) )
_UpperCAmelCase = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
_UpperCAmelCase = params["multiple_of"] if "multiple_of" in params else 256
_UpperCAmelCase = LlamaConfig(
hidden_size=__lowercase , intermediate_size=compute_intermediate_size(__lowercase , __lowercase , __lowercase ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=__lowercase , )
config.save_pretrained(__lowercase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
_UpperCAmelCase = LlamaForCausalLM.from_pretrained(__lowercase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowercase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(__lowercase , safe_serialization=__lowercase )
shutil.rmtree(__lowercase )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_UpperCAmelCase = tokenizer_class(__lowercase )
tokenizer.save_pretrained(__lowercase )
def UpperCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=__lowercase , help="Whether or not to save using `safetensors`." )
_UpperCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_UpperCAmelCase = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , __lowercase )
if __name__ == "__main__":
main()
| 236
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
__SCREAMING_SNAKE_CASE :str = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = """tapas"""
def __init__( self : Any , snake_case_ : Union[str, Any]=3_0_5_2_2 , snake_case_ : Tuple=7_6_8 , snake_case_ : List[Any]=1_2 , snake_case_ : Tuple=1_2 , snake_case_ : Dict=3_0_7_2 , snake_case_ : Dict="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=1_0_2_4 , snake_case_ : Dict=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , snake_case_ : Dict=0.0_2 , snake_case_ : Optional[int]=1e-12 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=1_0.0 , snake_case_ : Tuple=0 , snake_case_ : Optional[int]=1.0 , snake_case_ : Optional[Any]=None , snake_case_ : Any=1.0 , snake_case_ : List[str]=False , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=1.0 , snake_case_ : Optional[int]=1.0 , snake_case_ : Tuple=False , snake_case_ : Optional[int]=False , snake_case_ : str="ratio" , snake_case_ : Dict=None , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=6_4 , snake_case_ : Optional[int]=3_2 , snake_case_ : List[str]=False , snake_case_ : int=True , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , **snake_case_ : str , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_sizes
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
_UpperCAmelCase = positive_label_weight
_UpperCAmelCase = num_aggregation_labels
_UpperCAmelCase = aggregation_loss_weight
_UpperCAmelCase = use_answer_as_supervision
_UpperCAmelCase = answer_loss_importance
_UpperCAmelCase = use_normalized_answer_loss
_UpperCAmelCase = huber_loss_delta
_UpperCAmelCase = temperature
_UpperCAmelCase = aggregation_temperature
_UpperCAmelCase = use_gumbel_for_cells
_UpperCAmelCase = use_gumbel_for_aggregation
_UpperCAmelCase = average_approximation_function
_UpperCAmelCase = cell_selection_preference
_UpperCAmelCase = answer_loss_cutoff
_UpperCAmelCase = max_num_rows
_UpperCAmelCase = max_num_columns
_UpperCAmelCase = average_logits_per_cell
_UpperCAmelCase = select_one_column
_UpperCAmelCase = allow_empty_column_selection
_UpperCAmelCase = init_cell_selection_weights_to_zero
_UpperCAmelCase = reset_position_index_per_cell
_UpperCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
_UpperCAmelCase = aggregation_labels
_UpperCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case_ ):
_UpperCAmelCase = {int(snake_case_ ): v for k, v in aggregation_labels.items()}
| 236
| 1
|
from __future__ import annotations
a_ = tuple[int, int, int]
a_ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
a_ = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
a_ = 'FOBHMDKEXQNRAULPGSJVTYICZW'
a_ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
a_ = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
a_ = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
a_ = 'SGLCPQWZHKXAREONTFBVIYJUDM'
a_ = 'HVSICLTYKQUBXDWAJZOMFGPREN'
a_ = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
a_ = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
a_ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(__UpperCamelCase ) )) < 3:
__lowercase : Optional[Any] = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(__UpperCamelCase )
# Checks if rotor positions are valid
__lowercase : List[str] = rotpos
if not 0 < rotorposa <= len(__UpperCamelCase ):
__lowercase : Any = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(__UpperCamelCase )
if not 0 < rotorposa <= len(__UpperCamelCase ):
__lowercase : Optional[int] = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(__UpperCamelCase )
if not 0 < rotorposa <= len(__UpperCamelCase ):
__lowercase : Tuple = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(__UpperCamelCase )
# Validates string and returns dict
__lowercase : str = _plugboard(__UpperCamelCase )
return rotpos, rotsel, pbdict
def __UpperCAmelCase ( __UpperCamelCase ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = f"""Plugboard setting isn't type string ({type(__UpperCamelCase )})"""
raise TypeError(__UpperCamelCase )
elif len(__UpperCamelCase ) % 2 != 0:
__lowercase : Optional[int] = f"""Odd number of symbols ({len(__UpperCamelCase )})"""
raise Exception(__UpperCamelCase )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
__lowercase : List[Any] = set()
for i in pbstring:
if i not in abc:
__lowercase : List[Any] = f"""'{i}' not in list of symbols"""
raise Exception(__UpperCamelCase )
elif i in tmppbl:
__lowercase : Any = f"""Duplicate symbol ({i})"""
raise Exception(__UpperCamelCase )
else:
tmppbl.add(__UpperCamelCase )
del tmppbl
# Created the dictionary
__lowercase : Tuple = {}
for j in range(0 , len(__UpperCamelCase ) - 1 , 2 ):
__lowercase : Any = pbstring[j + 1]
__lowercase : str = pbstring[j]
return pb
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (rotora, rotora, rotora) , __UpperCamelCase = "" , ):
__lowercase : Dict = text.upper()
__lowercase : Any = _validator(
__UpperCamelCase , __UpperCamelCase , plugb.upper() )
__lowercase : Dict = rotor_position
__lowercase : Any = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__lowercase : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__lowercase : Any = plugboard[symbol]
# rotor ra --------------------------
__lowercase : str = abc.index(__UpperCamelCase ) + rotorposa
__lowercase : Dict = rotora[index % len(__UpperCamelCase )]
# rotor rb --------------------------
__lowercase : Union[str, Any] = abc.index(__UpperCamelCase ) + rotorposa
__lowercase : List[Any] = rotora[index % len(__UpperCamelCase )]
# rotor rc --------------------------
__lowercase : Optional[int] = abc.index(__UpperCamelCase ) + rotorposa
__lowercase : Any = rotora[index % len(__UpperCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__lowercase : Optional[int] = reflector[symbol]
# 2nd rotors
__lowercase : Union[str, Any] = abc[rotora.index(__UpperCamelCase ) - rotorposa]
__lowercase : Any = abc[rotora.index(__UpperCamelCase ) - rotorposa]
__lowercase : List[str] = abc[rotora.index(__UpperCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__lowercase : Any = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__UpperCamelCase ):
__lowercase : List[str] = 0
rotorposa += 1
if rotorposa >= len(__UpperCamelCase ):
__lowercase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(__UpperCamelCase ):
__lowercase : str = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__UpperCamelCase )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
a_ = 'This is my Python script that emulates the Enigma machine from WWII.'
a_ = (1, 1, 1)
a_ = 'pictures'
a_ = (rotora, rotora, rotora)
a_ = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 718
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
assert column_title.isupper()
__lowercase : Optional[Any] = 0
__lowercase : Union[str, Any] = len(__UpperCamelCase ) - 1
__lowercase : Union[str, Any] = 0
while index >= 0:
__lowercase : List[Any] = (ord(column_title[index] ) - 64) * pow(26 , __UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 523
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = """xlm-roberta"""
def __init__( self , lowerCAmelCase__=30_522 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> str:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 247
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = """convnextv2"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=0.0 , lowerCAmelCase__=224 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_stages
SCREAMING_SNAKE_CASE = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = ['stem'] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 247
| 1
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE_,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[str] = PegasusTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Dict:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> Any:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def a__ ( self , _a ) -> Dict:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[str]:
_A : List[Any] = '''</s>'''
_A : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def a__ ( self ) -> Dict:
_A : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(UpperCamelCase__ ) , 1103 )
def a__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : List[Any] = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_A : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
_A : List[str] = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def a__ ( self ) -> Optional[int]:
_A : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_A : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def a__ ( self ) -> Union[str, Any]:
_A : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : List[Any] = '''To ensure a smooth flow of bank resolutions.'''
_A : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : List[str] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> int:
_A : Union[str, Any] = ['''This is going to be way too long.''' * 150, '''short example''']
_A : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
_A : Tuple = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
_A : Any = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Union[str, Any]:
# fmt: off
_A : Optional[Any] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE_,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Optional[int] = PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> str:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> List[Any]:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[str]:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_A : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
_A : str = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def a__ ( self ) -> List[str]:
_A : Dict = ['''This is going to be way too long.''' * 1000, '''short example''']
_A : str = ['''not super long but more than 5 tokens''', '''tiny''']
_A : List[Any] = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
_A : Dict = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> List[Any]:
_A : Tuple = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_A : List[str] = self._large_tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(
UpperCamelCase__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 710
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 0
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=14, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=0.0_2, ) -> Tuple:
UpperCAmelCase_: Optional[Any] = parent
UpperCAmelCase_: Any = batch_size
UpperCAmelCase_: int = seq_length
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: List[Any] = use_input_mask
UpperCAmelCase_: Optional[int] = use_token_type_ids
UpperCAmelCase_: Optional[int] = use_labels
UpperCAmelCase_: Dict = vocab_size
UpperCAmelCase_: Optional[Any] = hidden_size
UpperCAmelCase_: Tuple = rotary_dim
UpperCAmelCase_: List[Any] = num_hidden_layers
UpperCAmelCase_: List[str] = num_attention_heads
UpperCAmelCase_: str = intermediate_size
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: List[str] = hidden_dropout_prob
UpperCAmelCase_: Tuple = attention_probs_dropout_prob
UpperCAmelCase_: List[Any] = max_position_embeddings
UpperCAmelCase_: int = initializer_range
UpperCAmelCase_: str = None
UpperCAmelCase_: str = vocab_size - 1
UpperCAmelCase_: str = vocab_size - 1
UpperCAmelCase_: int = vocab_size - 1
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: Tuple = None
if self.use_input_mask:
UpperCAmelCase_: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: Dict = GPTJConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=SCREAMING_SNAKE_CASE_, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, )
return (config, input_ids, input_mask)
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Tuple = config_and_inputs
UpperCAmelCase_: Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: List[Any] = 20
UpperCAmelCase_: List[str] = model_class_name(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model.init_cache(input_ids.shape[0], SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="""i4""" )
UpperCAmelCase_: int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_: Union[str, Any] = model(
input_ids[:, :-1], attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=SCREAMING_SNAKE_CASE_, position_ids=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" )
UpperCAmelCase_: int = model(
input_ids[:, -1:], attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=outputs_cache.past_key_values, position_ids=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f'Max diff is {diff}' )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: str = 20
UpperCAmelCase_: Tuple = model_class_name(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, )
UpperCAmelCase_: str = model.init_cache(input_ids.shape[0], SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_: Any = model(
input_ids[:, :-1], attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=SCREAMING_SNAKE_CASE_, position_ids=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" )
UpperCAmelCase_: int = model(
input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=SCREAMING_SNAKE_CASE_, position_ids=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f'Max diff is {diff}' )
@require_flax
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = FlaxGPTJModelTester(self )
def __snake_case (self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@tooslow
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: List[Any] = GPTaTokenizer.from_pretrained("""gpt2""", pad_token="""<|endoftext|>""", padding_side="""left""" )
UpperCAmelCase_: Any = tokenizer(["""Hello this is a long string""", """Hey"""], return_tensors="""np""", padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
UpperCAmelCase_: Optional[int] = False
UpperCAmelCase_: Optional[Any] = model.config.eos_token_id
UpperCAmelCase_: List[str] = jax.jit(model.generate )
UpperCAmelCase_: str = jit_generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase_: Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@is_pt_flax_cross_test
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_: int = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_: Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_: Tuple = getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ , UpperCAmelCase_: Any = pt_inputs["""input_ids"""].shape
UpperCAmelCase_: Union[str, Any] = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Any = 0
UpperCAmelCase_: Any = 1
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: List[Any] = 1
UpperCAmelCase_: Tuple = pt_model_class(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_, dtype=jnp.floataa )
UpperCAmelCase_: Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = fx_state
with torch.no_grad():
UpperCAmelCase_: str = pt_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
UpperCAmelCase_: Optional[int] = fx_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model_class.from_pretrained(SCREAMING_SNAKE_CASE_, from_pt=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = fx_model_loaded(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4E-2 )
@is_pt_flax_cross_test
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_: Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_: Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_: List[str] = getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = pt_model_class(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_, dtype=jnp.floataa )
UpperCAmelCase_: Tuple = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_, fx_model.params )
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = pt_inputs["""input_ids"""].shape
UpperCAmelCase_: List[Any] = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Union[str, Any] = 0
UpperCAmelCase_: List[str] = 1
UpperCAmelCase_: int = 0
UpperCAmelCase_: Optional[int] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase_: int = pt_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
UpperCAmelCase_: Any = fx_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_, from_flax=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_: Dict = pt_model_loaded(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 )
@tooslow
def __snake_case (self ) -> Any:
for model_class_name in self.all_model_classes:
UpperCAmelCase_: Tuple = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
UpperCAmelCase_: int = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 556
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=36, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Tuple:
UpperCAmelCase_: Any = parent
UpperCAmelCase_: Union[str, Any] = batch_size
UpperCAmelCase_: int = seq_length
UpperCAmelCase_: Any = is_training
UpperCAmelCase_: str = use_input_mask
UpperCAmelCase_: Dict = use_token_type_ids
UpperCAmelCase_: Any = use_labels
UpperCAmelCase_: Any = vocab_size
UpperCAmelCase_: Dict = embedding_size
UpperCAmelCase_: Dict = hidden_size
UpperCAmelCase_: Dict = num_hidden_layers
UpperCAmelCase_: Dict = num_hidden_groups
UpperCAmelCase_: int = num_attention_heads
UpperCAmelCase_: List[str] = intermediate_size
UpperCAmelCase_: Any = hidden_act
UpperCAmelCase_: Dict = hidden_dropout_prob
UpperCAmelCase_: Any = attention_probs_dropout_prob
UpperCAmelCase_: Tuple = max_position_embeddings
UpperCAmelCase_: Tuple = type_vocab_size
UpperCAmelCase_: List[Any] = type_sequence_label_size
UpperCAmelCase_: Any = initializer_range
UpperCAmelCase_: int = num_labels
UpperCAmelCase_: Optional[int] = num_choices
UpperCAmelCase_: Tuple = scope
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: Tuple = None
if self.use_input_mask:
UpperCAmelCase_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: Dict = None
if self.use_token_type_ids:
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: Tuple = None
UpperCAmelCase_: Dict = None
UpperCAmelCase_: Tuple = None
if self.use_labels:
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: int = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: Any = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, sentence_order_label=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Optional[int] = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: Optional[Any] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, start_positions=SCREAMING_SNAKE_CASE_, end_positions=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCAmelCase_: Optional[Any] = self.num_labels
UpperCAmelCase_: List[Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: List[str] = self.num_labels
UpperCAmelCase_: Union[str, Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = self.num_choices
UpperCAmelCase_: Dict = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: str = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: str = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: List[Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: Union[str, Any] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): List[str] = config_and_inputs
UpperCAmelCase_: Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = True
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Any:
UpperCAmelCase_: Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = AlbertModelTester(self )
UpperCAmelCase_: Union[str, Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Union[str, Any] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Dict = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> Dict:
UpperCAmelCase_: str = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCAmelCase_: List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase_: List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
| 556
| 1
|
'''simple docstring'''
lowerCamelCase = {}
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCAmelCase = _calculate(days - 1 , UpperCAmelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCAmelCase = _calculate(days - 1 , UpperCAmelCase__ , 0 )
__lowerCAmelCase = state_late + state_absent + state_ontime
__lowerCAmelCase = prizestrings
return prizestrings
def __lowercase ( UpperCAmelCase__ = 30 ):
"""simple docstring"""
return _calculate(UpperCAmelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 702
|
def __lowercase ( UpperCAmelCase__ = 10 , UpperCAmelCase__ = 1_000 , UpperCAmelCase__ = True ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCAmelCase = lower
__lowerCAmelCase = higher
__lowerCAmelCase = []
while True:
__lowerCAmelCase = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
__lowerCAmelCase = number
elif answer(UpperCAmelCase__ ) == "high":
__lowerCAmelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = int(input('Enter lower value : ' ).strip() )
__lowerCAmelCase = int(input('Enter high value : ' ).strip() )
__lowerCAmelCase = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 102
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ ,lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
benchmark()
| 29
|
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = BlipImageProcessor()
_lowerCamelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
_lowerCamelCase = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
_lowerCamelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self , **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def snake_case__ ( self , **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def snake_case__ ( self , **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def snake_case__ ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ):
_lowerCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowerCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ):
_lowerCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
_lowerCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
_lowerCamelCase = self.prepare_image_inputs()
_lowerCamelCase = image_processor(UpperCamelCase__ , return_tensors='''np''' )
_lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ):
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = processor(text=UpperCamelCase__ )
_lowerCamelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
_lowerCamelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def snake_case__ ( self ):
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = self.prepare_image_inputs()
_lowerCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case__ ( self ):
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
_lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase = processor.batch_decode(UpperCamelCase__ )
_lowerCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.get_image_processor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_qformer_tokenizer()
_lowerCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = self.prepare_image_inputs()
_lowerCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 711
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_UpperCamelCase = "xvjiarui/stable-diffusion-2-inpainting"
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_ )
_UpperCamelCase = "Face of a yellow cat, high resolution, sitting on a park bench"
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = 50
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = num_samples * [init_image]
_UpperCamelCase = num_samples * [mask_image]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# shard inputs and rng
_UpperCamelCase = replicate(lowerCamelCase_ )
_UpperCamelCase = jax.random.split(lowerCamelCase_ , jax.device_count() )
_UpperCamelCase = shard(lowerCamelCase_ )
_UpperCamelCase = shard(lowerCamelCase_ )
_UpperCamelCase = shard(lowerCamelCase_ )
_UpperCamelCase = pipeline(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ )
_UpperCamelCase = output.images.reshape(lowerCamelCase_ , 5_12 , 5_12 , 3 )
_UpperCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 147
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowercase ):
__lowercase : str = ["note_seq"]
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["note_seq"] )
@classmethod
def lowercase ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
@classmethod
def lowercase ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
| 147
| 1
|
def lowercase_ ( SCREAMING_SNAKE_CASE : int = 4_00_00_00 ):
"""simple docstring"""
snake_case__ : Dict =[]
snake_case__ : Optional[int] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE )
snake_case__ : Dict =b, a + b
return sum(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowercase_ ( SCREAMING_SNAKE_CASE : str = "laptop" ):
"""simple docstring"""
snake_case__ : Dict =F'''https://www.amazon.in/laptop/s?k={product}'''
snake_case__ : List[str] ={
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
snake_case__ : Optional[Any] =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : int =DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
snake_case__ : str =item.ha.text
snake_case__ : str ='''https://www.amazon.in/''' + item.ha.a['''href''']
snake_case__ : Tuple =item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
snake_case__ : Dict =item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
snake_case__ : Any ='''Not available'''
try:
snake_case__ : int =(
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
snake_case__ : Optional[int] =''''''
try:
snake_case__ : Dict =float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
snake_case__ : Tuple =float('''nan''' )
except AttributeError:
pass
snake_case__ : List[Any] =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : Tuple =''' '''
snake_case__ : Any =''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase__ = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 408
| 0
|
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
def count_of_possible_combinations(UpperCamelCase__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
def count_of_possible_combinations_with_dp_array(
UpperCamelCase__ : int, UpperCamelCase__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__A : List[str] = sum(
count_of_possible_combinations_with_dp_array(target - item, UpperCamelCase__ )
for item in array )
__A : Dict = answer
return answer
__A : int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase__, UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
__A : List[str] = [0] * (target + 1)
__A : Union[str, Any] = 1
for i in range(1, target + 1 ):
for j in range(UpperCamelCase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : str = 5
UpperCAmelCase_ : List[str] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 365
|
'''simple docstring'''
from math import factorial
UpperCAmelCase_ : List[str] = {str(d): factorial(d) for d in range(1_0)}
def _lowercase ( UpperCamelCase__ : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase__ ) )
def _lowercase ( ):
__A : Union[str, Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3, UpperCamelCase__ ) if sum_of_digit_factorial(UpperCamelCase__ ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 365
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__magic_name__ = True
from torch.cuda.amp import autocast
__magic_name__ = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_A : Optional[bool] = field(
default=__UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_A : Optional[bool] = field(
default=__UpperCamelCase , metadata={'help': 'Whether to log verbose messages or not.'} , )
_A : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
_A : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
_A : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ = logging.WARNING
if model_args.verbose_logging:
snake_case__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case__ = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : str = field(
default=__UpperCamelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_A : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_A : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_A : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
_A : bool = field(
default=__UpperCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_A : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_A : Optional[int] = field(
default=__UpperCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_A : Optional[float] = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : WavaVecaForPreTraining
_A : WavaVecaFeatureExtractor
_A : Union[bool, str] = "longest"
_A : Optional[int] = None
_A : Optional[int] = None
def __call__( self , lowerCamelCase ):
# reformat list to dict and set to pytorch format
snake_case__ = self.feature_extractor.pad(
lowerCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
snake_case__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
snake_case__ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
snake_case__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case__ = 1
snake_case__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCamelCase , min_masks=2 , )
return batch
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def __init__( self , *lowerCamelCase , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=1.0 , **lowerCamelCase ):
super().__init__(*lowerCamelCase , **lowerCamelCase )
snake_case__ = 0
snake_case__ = max_gumbel_temp
snake_case__ = min_gumbel_temp
snake_case__ = gumbel_temp_decay
def A_ ( self , lowerCamelCase , lowerCamelCase ):
model.train()
snake_case__ = self._prepare_inputs(lowerCamelCase )
if self.use_amp:
with autocast():
snake_case__ = self.compute_loss(lowerCamelCase , lowerCamelCase )
else:
snake_case__ = self.compute_loss(lowerCamelCase , lowerCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case__ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
snake_case__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
snake_case__ , snake_case__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case__ = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
snake_case__ = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case__ = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
snake_case__ = WavaVecaForPreTraining(__lowerCAmelCase )
snake_case__ = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
snake_case__ = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 530
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = ReformerTokenizer
_A : str = ReformerTokenizerFast
_A : List[str] = True
_A : Tuple = False
_A : str = True
def A_ ( self ):
super().setUp()
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
snake_case__ = "<s>"
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A_ ( self ):
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase ) , 10_00 )
def A_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = "I was born in 92000, and this is falsé."
snake_case__ = tokenizer.tokenize(lowerCamelCase )
snake_case__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
snake_case__ = "This is a simple input"
snake_case__ = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ = ("This is a simple input", "This is a pair")
snake_case__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
snake_case__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def A_ ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def A_ ( self ):
snake_case__ = "Hello World!"
snake_case__ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def A_ ( self ):
snake_case__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case__ = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def A_ ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = " ".join(lowerCamelCase )
snake_case__ = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" )
snake_case__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
snake_case__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case__ = encoded_sequence["input_ids"].shape
snake_case__ = ReformerModel(lowerCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def A_ ( self ):
# fmt: off
snake_case__ = {"input_ids": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case__ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCamelCase , sequences=lowerCamelCase , )
| 530
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( UpperCAmelCase_ = "laptop"):
"""simple docstring"""
snake_case__ : Optional[Any] = F'https://www.amazon.in/laptop/s?k={product}'
snake_case__ : str = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
snake_case__ : Union[str, Any] = BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_).text)
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""}) , ):
try:
snake_case__ : List[Any] = item.ha.text
snake_case__ : Optional[int] = '''https://www.amazon.in/''' + item.ha.a['''href''']
snake_case__ : Optional[int] = item.find("""span""" , attrs={"""class""": """a-offscreen"""}).text
try:
snake_case__ : Optional[int] = item.find("""span""" , attrs={"""class""": """a-icon-alt"""}).text
except AttributeError:
snake_case__ : List[Any] = '''Not available'''
try:
snake_case__ : int = (
'''₹'''
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""}).text.split("""₹""")[1]
)
except AttributeError:
snake_case__ : Union[str, Any] = ''''''
try:
snake_case__ : Any = float(
(
(
float(product_mrp.strip("""₹""").replace(""",""" , """"""))
- float(product_price.strip("""₹""").replace(""",""" , """"""))
)
/ float(product_mrp.strip("""₹""").replace(""",""" , """"""))
)
* 100)
except ValueError:
snake_case__ : Dict = float("""nan""")
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : Dict = ''' '''
snake_case__ : List[str] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_: List[Any] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict = logging.get_logger(__name__)
_A : Union[str, Any] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = "luke"
def __init__( self : int , A : Optional[int]=5_0_2_6_7 , A : Any=5_0_0_0_0_0 , A : Tuple=7_6_8 , A : List[Any]=2_5_6 , A : Any=1_2 , A : List[Any]=1_2 , A : Tuple=3_0_7_2 , A : str="gelu" , A : Optional[int]=0.1 , A : Tuple=0.1 , A : List[Any]=5_1_2 , A : Optional[int]=2 , A : Dict=0.02 , A : Union[str, Any]=1e-12 , A : Dict=True , A : Optional[Any]=None , A : Dict=1 , A : str=0 , A : int=2 , **A : Optional[int] , ) ->Optional[int]:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Dict = entity_vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[int] = entity_emb_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Dict = use_entity_aware_attention
lowerCamelCase__ : Optional[int] = classifier_dropout
| 315
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
snake_case_ = Vector()
def a ( self ):
snake_case_ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case ) , '(0,0,0,0,0,1)' )
def a ( self ):
snake_case_ = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case ) , 4 )
def a ( self ):
snake_case_ = Vector([1, 2] )
snake_case_ = Vector([1, 2, 3, 4, 5] )
snake_case_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
snake_case_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def a ( self ):
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def a ( self ):
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def a ( self ):
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([2, -1, 4] ) # for test of dot product
snake_case_ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def a ( self ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def a ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def a ( self ):
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case , snake_case ) ) , '(3,4,7)' )
def a ( self ):
snake_case_ = Vector([1, 0, 0, 0, 0, 0] )
snake_case_ = x.copy()
self.assertEqual(str(snake_case ) , str(snake_case ) )
def a ( self ):
snake_case_ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case ) , '(0,1,0)' )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(snake_case ) )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case , snake_case ) )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case , snake_case ) )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
snake_case_ = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(snake_case ) )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def a ( self ):
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def a ( self ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 108
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_UpperCAmelCase : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_UpperCAmelCase : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_UpperCAmelCase : Tuple = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_UpperCAmelCase : Union[str, Any] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_UpperCAmelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_UpperCAmelCase : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 108
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_snake_case : Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
__snake_case : int = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCamelCase , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__snake_case : List[str] = parser.parse_args()
return args
def lowerCAmelCase_ ( __lowerCamelCase ):
def fn(__lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = []
for i in range(len(tokenized_data["input_ids"] ) ):
__snake_case : Tuple = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__snake_case : List[Any] = tf.train.Features(feature=__lowerCamelCase )
__snake_case : str = tf.train.Example(features=__lowerCamelCase )
__snake_case : List[str] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit )
__snake_case : Dict = dataset.select(range(__lowerCamelCase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
__snake_case : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
__snake_case : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case : Any = tokenize_function(__lowerCamelCase )
__snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
__snake_case : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case : int = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1_0_0_0 , num_proc=4 )
__snake_case : Optional[Any] = 0
__snake_case : Optional[Any] = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
__snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size]
__snake_case : Any = len(dataset_snapshot["input_ids"] )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
__snake_case : Optional[Any] = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
__snake_case : Union[str, Any] = serialized_examples[i]
out_file.write(__lowerCamelCase )
print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=__lowerCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = parse_args()
main(args)
| 81
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Optional[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Optional[int]:
def decorator(lowerCamelCase__ : int ):
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCamelCase__, "handle_key", [] )
handle += [key]
setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ )
return func
return decorator
def _lowerCAmelCase ( *lowerCamelCase__ : List[str] ) -> Tuple:
def decorator(lowerCamelCase__ : Dict ):
_SCREAMING_SNAKE_CASE : List[Any] = getattr(lowerCamelCase__, "handle_key", [] )
handle += keys
setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ )
return func
return decorator
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __new__( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ )
if not hasattr(snake_case__ , "key_handler" ):
setattr(snake_case__ , "key_handler" , {} )
setattr(snake_case__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(snake_case__ , "handle_key" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE : Tuple = value
return new_cls
@staticmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE : Dict = ord(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = cls.key_handler.get(snake_case__ )
if handler:
_SCREAMING_SNAKE_CASE : Optional[int] = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls : List[Any] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 295
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = MBartConfig
_UpperCAmelCase = {}
_UpperCAmelCase = '''gelu'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=False , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case=0.1 , snake_case=0.1 , snake_case=20 , snake_case=2 , snake_case=1 , snake_case=0 , ) -> List[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_mbart_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[str]:
_UpperCAmelCase = TFMBartModel(config=__UpperCamelCase ).get_decoder()
_UpperCAmelCase = inputs_dict['input_ids']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
_UpperCAmelCase = inputs_dict['head_mask']
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
_UpperCAmelCase = past_key_values[1]
def UpperCAmelCase ( A : Any , A : List[Any] , A : List[Any] , A : Optional[Any]=None , A : Any=None , A : Optional[Any]=None , A : int=None , A : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__ ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_UpperCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = TFMBartModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
_UpperCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
_UpperCAmelCase = '''facebook/mbart-large-en-ro'''
@cached_property
def lowerCamelCase_ ( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase_ ( self , **snake_case ) -> int:
_UpperCAmelCase = self.translate_src_text(**__UpperCamelCase )
self.assertListEqual(self.expected_text , __UpperCamelCase )
def lowerCamelCase_ ( self , **snake_case ) -> Optional[int]:
_UpperCAmelCase = self.tokenizer(self.src_text , **__UpperCamelCase , return_tensors='tf' )
_UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_UpperCAmelCase = self.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase_ ( self ) -> Tuple:
self._assert_generated_batch_equal_expected()
| 573
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 187
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : int=13 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Any=99 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : Any=2 , __UpperCamelCase : str=0.02 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Tuple="last" , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_lengths
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_special
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = summary_type
_UpperCAmelCase = use_proj
_UpperCAmelCase = scope
_UpperCAmelCase = bos_token_id
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : str , ):
_UpperCAmelCase = XLMModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ )
_UpperCAmelCase = model(lowerCamelCase_ , langs=lowerCamelCase_ )
_UpperCAmelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , ):
_UpperCAmelCase = XLMWithLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : List[Any] , ):
_UpperCAmelCase = XLMForQuestionAnsweringSimple(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ )
_UpperCAmelCase = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
_UpperCAmelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , ):
_UpperCAmelCase = XLMForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ )
_UpperCAmelCase = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , )
_UpperCAmelCase = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , )
(_UpperCAmelCase ) = result_with_labels.to_tuple()
_UpperCAmelCase = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
(_UpperCAmelCase ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , ):
_UpperCAmelCase = XLMForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ )
_UpperCAmelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = XLMForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = XLMForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE : Dict = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Any=False ):
_UpperCAmelCase = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = XLMModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str=False , __UpperCamelCase : str=1 ):
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Dict=1 ):
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , )
pass
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = XLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(lowerCamelCase_ )
_UpperCAmelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCamelCase_ ) # the president
_UpperCAmelCase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_UpperCAmelCase = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
| 718
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCAmelCase = "sshleifer/bart-tiny-random"
__lowerCAmelCase = "patrickvonplaten/t5-tiny-random"
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Tuple ):
return AutoConfig.from_pretrained(__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase__ ( self : str ):
with self.assertRaises(__UpperCamelCase ):
create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=__UpperCamelCase , d=__UpperCamelCase )
| 129
| 0
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ (__A ):
__magic_name__ = ''''''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , **lowerCAmelCase_ : List[str] ) -> List[Any]:
super().__init__(self , **lowerCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase_ : Dict = fsspec.open(
lowerCAmelCase_ , mode="rb" , protocol=lowerCAmelCase_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase_ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase_ : Any = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase_ : Any = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase_ ).lstrip("/" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if self.dir_cache is None:
UpperCAmelCase_ : Optional[Any] = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase_ : str = {f["name"]: f}
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> Any:
return self.file.open().read()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> Tuple:
UpperCAmelCase_ : List[str] = self._strip_protocol(lowerCAmelCase_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCamelCase_ (__A ):
__magic_name__ = '''bz2'''
__magic_name__ = '''bz2'''
__magic_name__ = '''.bz2'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''gzip'''
__magic_name__ = '''gzip'''
__magic_name__ = '''.gz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''lz4'''
__magic_name__ = '''lz4'''
__magic_name__ = '''.lz4'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''xz'''
__magic_name__ = '''xz'''
__magic_name__ = '''.xz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''zstd'''
__magic_name__ = '''zstd'''
__magic_name__ = '''.zst'''
def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
fo=lowerCAmelCase_ , mode=lowerCAmelCase_ , target_protocol=lowerCAmelCase_ , target_options=lowerCAmelCase_ , block_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase_ : Optional[Any] = self.file.__enter__
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = file_
def __enter__( self : Tuple ) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Optional[int]:
self._file.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __iter__( self : Optional[int] ) -> int:
return iter(self._file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return next(self._file )
def __getattr__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return getattr(self._file , lowerCAmelCase_ )
def fixed_enter(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ):
return WrappedFile(_enter(*lowerCAmelCase_ , **lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = fixed_enter
| 95
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = ["""image_processor""", """tokenizer"""]
_lowerCAmelCase = """LayoutLMv3ImageProcessor"""
_lowerCAmelCase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , A_=None , A_=None , **A_ ):
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCamelCase = kwargs.pop("feature_extractor" )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_UpperCamelCase = self.image_processor(images=A_ , return_tensors=A_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A_ , A_ ):
_UpperCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCamelCase = features["words"]
_UpperCamelCase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
# add pixel values
_UpperCamelCase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_UpperCamelCase = self.get_overflowing_images(A_ , encoded_inputs["overflow_to_sample_mapping"] )
_UpperCamelCase = images
return encoded_inputs
def a ( self , A_ , A_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A_ ) != len(A_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(A_ )} and {len(A_ )}" )
return images_with_overflow
def a ( self , *A_ , **A_ ):
return self.tokenizer.batch_decode(*A_ , **A_ )
def a ( self , *A_ , **A_ ):
return self.tokenizer.decode(*A_ , **A_ )
@property
def a ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 138
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A ( __UpperCAmelCase ):
def __lt__( self, UpperCamelCase__ ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self, UpperCamelCase__ ):
"""simple docstring"""
return self[-1] == other[-1]
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# sort into stacks
for element in collection:
lowerCAmelCase_ = Stack([element] )
lowerCAmelCase_ = bisect_left(_A , _A )
if i != len(_A ):
stacks[i].append(_A )
else:
stacks.append(_A )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase_ = merge(*(reversed(_A ) for stack in stacks) )
return collection
if __name__ == "__main__":
_A = input('''Enter numbers separated by a comma:\n''').strip()
_A = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 325
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A = get_logger(__name__)
class A :
__snake_case = 'dummy_data'
__snake_case = 'datasets'
__snake_case = False
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = True, UpperCamelCase__ = None, ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = dataset_name
lowerCAmelCase_ = cache_dir
lowerCAmelCase_ = use_local_dummy_data
lowerCAmelCase_ = config
# download_callbacks take a single url as input
lowerCAmelCase_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase_ = str(UpperCamelCase__ )
# to be downloaded
lowerCAmelCase_ = None
lowerCAmelCase_ = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase_ = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''', self.config.name, self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''', self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder, '''dummy_data.zip''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase_ = cached_path(
UpperCamelCase__, cache_dir=self.cache_dir, extract_compressed_file=UpperCamelCase__, force_extract=UpperCamelCase__ )
return os.path.join(UpperCamelCase__, self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase_ = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, '''/''' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep, '''/''' ).split('''/''' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
return self.create_dummy_data_dict(UpperCamelCase__, UpperCamelCase__ )
elif isinstance(UpperCamelCase__, (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase__, UpperCamelCase__ )
else:
return self.create_dummy_data_single(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__ ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return path
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
for single_url in single_urls:
download_callback(UpperCamelCase__ )
else:
lowerCAmelCase_ = single_urls
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [os.path.join(UpperCamelCase__, urllib.parse.quote_plus(Path(UpperCamelCase__ ).name ) ) for x in single_urls]
else:
lowerCAmelCase_ = single_urls
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(Path(UpperCamelCase__ ).name ) )
lowerCAmelCase_ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase_ = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''', UpperCamelCase__ ) ) for url in data_url )
lowerCAmelCase_ = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase_ = [data_url[0]] * len(UpperCamelCase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(UpperCamelCase__ )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(UpperCamelCase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
def _iter_archive_members(UpperCamelCase__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase_ = Path(self.dummy_file ).parent
lowerCAmelCase_ = path.relative_to(UpperCamelCase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase__ )
lowerCAmelCase_ = Path(UpperCamelCase__ )
lowerCAmelCase_ = _iter_archive_members(UpperCamelCase__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(UpperCamelCase__ ).as_posix(), file_path.open('''rb''' )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase__ ):
if os.path.basename(UpperCamelCase__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase__ ):
if os.path.basename(UpperCamelCase__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(UpperCamelCase__, UpperCamelCase__ )
| 325
| 1
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''xlm-prophetnet'''
_UpperCAmelCase : Any = ['''past_key_values''']
_UpperCAmelCase : Tuple = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ):
SCREAMING_SNAKE_CASE_: List[Any] = vocab_size
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads
SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Any = num_decoder_layers
SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE_: Dict = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE_: Optional[int] = ngram
SCREAMING_SNAKE_CASE_: Tuple = num_buckets
SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance
SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss
SCREAMING_SNAKE_CASE_: Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE_: Any = attention_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_: str = dropout
SCREAMING_SNAKE_CASE_: Optional[int] = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`.")
| 671
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Any = data
SCREAMING_SNAKE_CASE_: Node | None = None
class __lowercase :
"""simple docstring"""
def __init__( self : int):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = None
def __iter__( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE_: List[str] = node.next
if node == self.head:
break
def __len__( self : Dict):
return sum(1 for _ in self)
def __repr__( self : Dict):
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(len(self) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(0 , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any):
if index < 0 or index > len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__)
if self.head is None:
SCREAMING_SNAKE_CASE_: str = new_node # first node points itself
SCREAMING_SNAKE_CASE_: Optional[Any] = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
SCREAMING_SNAKE_CASE_: str = new_node
else:
SCREAMING_SNAKE_CASE_: int = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: List[str] = temp.next
SCREAMING_SNAKE_CASE_: int = new_node
if index == len(self) - 1: # insert at tail
SCREAMING_SNAKE_CASE_: Any = new_node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.delete_nth(0)
def _SCREAMING_SNAKE_CASE ( self : Any):
return self.delete_nth(len(self) - 1)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0):
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE_: List[str] = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE_: int = self.tail.next.next
SCREAMING_SNAKE_CASE_: Tuple = self.head.next
else:
SCREAMING_SNAKE_CASE_: Optional[int] = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Any = temp.next
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: int = temp.next.next
if index == len(self) - 1: # delete at tail
SCREAMING_SNAKE_CASE_: int = temp
return delete_node.data
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return len(self) == 0
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList()
assert len(_UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCAmelCase ) == i
circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="pt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {}
SCREAMING_SNAKE_CASE = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
def __init__( self : str , a : Tuple , a : Tuple , a : List[str] , a : List[str] , a : List[str]="train" , a : List[str]=None , a : List[Any]=None , a : int=None , a : Union[str, Any]="" , ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" )
SCREAMING_SNAKE_CASE = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" )
SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE = max_source_length
SCREAMING_SNAKE_CASE = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self : Union[str, Any] , a : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" )
SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
SCREAMING_SNAKE_CASE = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" )
SCREAMING_SNAKE_CASE = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" )
SCREAMING_SNAKE_CASE = source_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = target_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCAmelCase ( a : Dict ) -> int:
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def _UpperCAmelCase ( self : Tuple , a : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = torch.stack([x["""input_ids"""] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x["""attention_mask"""] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x["""decoder_input_ids"""] for x in batch] )
SCREAMING_SNAKE_CASE = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : Union[str, Any] = getLogger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=4 , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(lowerCamelCase_ , """wb""" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def remove_articles(SCREAMING_SNAKE_CASE ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ )
def white_space_fix(SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = normalize_answer(lowerCamelCase_ ).split()
SCREAMING_SNAKE_CASE = normalize_answer(lowerCamelCase_ ).split()
SCREAMING_SNAKE_CASE = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
SCREAMING_SNAKE_CASE = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config
| 713
|
import os
import sys
import unittest
__A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a , """torch_and_transformers_and_onnx""" )
def _UpperCAmelCase ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a )
self.assertIn("""torch_and_transformers""" , a )
self.assertIn("""flax_and_transformers""" , a )
self.assertIn("""torch_and_transformers_and_onnx""" , a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _UpperCAmelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a , a )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a )
| 450
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
|
import numpy
# List of input, output pairs
_A = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_A = (((515, 22, 13), 555), ((61, 35, 49), 150))
_A = [2, 4, 1, 5]
_A = len(train_data)
_A = 0.009
def lowerCAmelCase_ ( __a , __a="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(__a , __a ) - output(
__a , __a )
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple =0
for i in range(len(__a ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase_ ( __a , __a=m ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple =0
for i in range(__a ):
if index == -1:
summation_value += _error(__a )
else:
summation_value += _error(__a ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str =summation_of_cost_derivative(__a , __a ) / m
return cost_derivative_value
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE : Tuple =0.000002
SCREAMING_SNAKE_CASE : Optional[Any] =0
SCREAMING_SNAKE_CASE : Tuple =0
while True:
j += 1
SCREAMING_SNAKE_CASE : List[str] =[0, 0, 0, 0]
for i in range(0 , len(__a ) ):
SCREAMING_SNAKE_CASE : Tuple =get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE : Tuple =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__a , __a , atol=__a , rtol=__a , ):
break
SCREAMING_SNAKE_CASE : Union[str, Any] =temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
for i in range(len(__a ) ):
print(('''Actual output value:''', output(__a , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(__a , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 258
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 508
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase__( __UpperCamelCase: bytes ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
SCREAMING_SNAKE_CASE : Optional[Any] = 'f32le'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE : Union[str, Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{sampling_rate}"
SCREAMING_SNAKE_CASE : str = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
SCREAMING_SNAKE_CASE : Optional[Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : List[str] = 'alsa'
SCREAMING_SNAKE_CASE : str = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Dict = 'avfoundation'
SCREAMING_SNAKE_CASE : int = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE : str = 'dshow'
SCREAMING_SNAKE_CASE : Any = 'default'
SCREAMING_SNAKE_CASE : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[Union[Tuple[float, float], float]] = None ,__UpperCamelCase: str = "f32le" ,):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Any = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : Dict = chunk_length_s
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = np.floataa
SCREAMING_SNAKE_CASE : List[Any] = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
SCREAMING_SNAKE_CASE : str = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : List[Any] = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Tuple[int, int] ,__UpperCamelCase: bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = B''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
SCREAMING_SNAKE_CASE : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : Optional[int] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE : Optional[int] = False
yield item
SCREAMING_SNAKE_CASE : List[Any] = stride_left
SCREAMING_SNAKE_CASE : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
SCREAMING_SNAKE_CASE : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : List[Any] = False
yield item
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : Any = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 508
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "imagegpt"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __lowerCamelCase : List[Any]=5_1_2 + 1 , __lowerCamelCase : Dict=3_2 * 3_2 , __lowerCamelCase : List[str]=5_1_2 , __lowerCamelCase : List[Any]=2_4 , __lowerCamelCase : Any=8 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any="quick_gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=False , **__lowerCamelCase : str , ):
UpperCAmelCase__ :Dict = vocab_size
UpperCAmelCase__ :str = n_positions
UpperCAmelCase__ :Tuple = n_embd
UpperCAmelCase__ :Dict = n_layer
UpperCAmelCase__ :List[Any] = n_head
UpperCAmelCase__ :str = n_inner
UpperCAmelCase__ :Optional[Any] = activation_function
UpperCAmelCase__ :str = resid_pdrop
UpperCAmelCase__ :Optional[Any] = embd_pdrop
UpperCAmelCase__ :Tuple = attn_pdrop
UpperCAmelCase__ :int = layer_norm_epsilon
UpperCAmelCase__ :List[Any] = initializer_range
UpperCAmelCase__ :List[Any] = scale_attn_weights
UpperCAmelCase__ :List[str] = use_cache
UpperCAmelCase__ :Tuple = scale_attn_by_inverse_layer_idx
UpperCAmelCase__ :Union[str, Any] = reorder_and_upcast_attn
UpperCAmelCase__ :List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase ( _snake_case ):
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , ):
UpperCAmelCase__ :Tuple = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs
| 467
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a : List[Any] = 0
_a : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a : Dict = tuple[int, int]
class a_ :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None , ):
"""simple docstring"""
snake_case : str = pos_x
snake_case : int = pos_y
snake_case : int = (pos_y, pos_x)
snake_case : List[str] = goal_x
snake_case : Tuple = goal_y
snake_case : str = g_cost
snake_case : Optional[Any] = parent
snake_case : Dict = self.calculate_heuristic()
snake_case : Dict = self.g_cost + self.h_cost
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[Any] = self.pos_x - self.goal_x
snake_case : List[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase__ ) + abs(UpperCAmelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[int] , UpperCAmelCase__ : Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : TPosition , UpperCAmelCase__ : TPosition ):
"""simple docstring"""
snake_case : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase__ )
snake_case : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCAmelCase__ )
snake_case : Tuple = [self.start]
snake_case : list[Node] = []
snake_case : Tuple = False
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase__ )
self.closed_nodes.append(UpperCAmelCase__ )
snake_case : str = self.get_successors(UpperCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase__ )
else:
# retrieve the best current path
snake_case : Tuple = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase__ )
else:
self.open_nodes.append(UpperCAmelCase__ )
return [self.start.pos]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Node ):
"""simple docstring"""
snake_case : Dict = []
for action in delta:
snake_case : List[Any] = parent.pos_x + action[1]
snake_case : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase__ , ) )
return successors
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Node | None ):
"""simple docstring"""
snake_case : Any = node
snake_case : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Dict = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self : str , UpperCAmelCase__ : TPosition , UpperCAmelCase__ : TPosition ):
"""simple docstring"""
snake_case : str = AStar(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = AStar(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case : Dict = self.fwd_astar.open_nodes.pop(0 )
snake_case : Tuple = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
self.fwd_astar.closed_nodes.append(UpperCAmelCase__ )
self.bwd_astar.closed_nodes.append(UpperCAmelCase__ )
snake_case : int = current_bwd_node
snake_case : Dict = current_fwd_node
snake_case : List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase__ )
else:
# retrieve the best current path
snake_case : str = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase__ )
else:
astar.open_nodes.append(UpperCAmelCase__ )
return [self.fwd_astar.start.pos]
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ):
"""simple docstring"""
snake_case : Union[str, Any] = self.fwd_astar.retrace_path(UpperCAmelCase__ )
snake_case : List[str] = self.bwd_astar.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
snake_case : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a : Dict = (0, 0)
_a : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a : int = time.time()
_a : int = AStar(init, goal)
_a : List[Any] = a_star.search()
_a : List[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
_a : Optional[int] = time.time()
_a : str = BidirectionalAStar(init, goal)
_a : Any = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 84
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Dict ) -> List[Any]:
__lowerCAmelCase : Any = """huggingface/label-files"""
__lowerCAmelCase : List[Any] = """imagenet-1k-id2label.json"""
__lowerCAmelCase : Dict = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : List[str] = {int(__A ): v for k, v in idalabel.items()}
__lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : Tuple = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__lowerCAmelCase : List[Any] = BitConfig(
conv_layer=__A , num_labels=1_0_0_0 , idalabel=__A , labelaid=__A , )
return config
def snake_case_ (__A : Dict ) -> str:
if "stem.conv" in name:
__lowerCAmelCase : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__lowerCAmelCase : str = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__lowerCAmelCase : str = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__lowerCAmelCase : List[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__lowerCAmelCase : Optional[int] = """bit.encoder.""" + name
return name
def snake_case_ () -> Optional[int]:
__lowerCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : str = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def snake_case_ (__A : Optional[int] , __A : List[str] , __A : Any=False ) -> str:
__lowerCAmelCase : int = get_config(__A )
# load original model from timm
__lowerCAmelCase : Any = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
__lowerCAmelCase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__lowerCAmelCase : Dict = state_dict.pop(__A )
__lowerCAmelCase : str = val.squeeze() if """head""" in key else val
# load HuggingFace model
__lowerCAmelCase : Dict = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
__lowerCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=__A ) )
__lowerCAmelCase : Optional[Any] = transform.transforms
__lowerCAmelCase : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__lowerCAmelCase : int = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__lowerCAmelCase : str = prepare_img()
__lowerCAmelCase : str = transform(__A ).unsqueeze(0 )
__lowerCAmelCase : Optional[int] = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
__lowerCAmelCase : Dict = model(__A )
__lowerCAmelCase : List[Any] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__lowerCAmelCase : List[Any] = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 651
|
import math
def snake_case_ (__A : int = 1_0_0 ) -> int:
__lowerCAmelCase : List[str] = sum(i * i for i in range(1 , n + 1 ) )
__lowerCAmelCase : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 651
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__lowerCamelCase )] )
__a = np.array(__lowerCamelCase )
__a = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __lowerCamelCase ) ) , x.transpose() ) , __lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = (1, 2, 1)
__a = (1, 1, 0, 7)
__a = SARIMAX(
__lowerCamelCase , exog=__lowerCamelCase , order=__lowerCamelCase , seasonal_order=__lowerCamelCase )
__a = model.fit(disp=__lowerCamelCase , maxiter=600 , method='nm' )
__a = model_fit.predict(1 , len(__lowerCamelCase ) , exog=[test_match] )
return result[0]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__lowerCamelCase , __lowerCamelCase )
__a = regressor.predict(__lowerCamelCase )
return y_pred[0]
def lowerCAmelCase( __lowerCamelCase ):
train_user.sort()
__a = np.percentile(__lowerCamelCase , 25 )
__a = np.percentile(__lowerCamelCase , 75 )
__a = qa - qa
__a = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = 0
__a = 0
for i in list_vote:
if i > actual_result:
__a = not_safe + 1
else:
if abs(abs(__lowerCamelCase ) - abs(__lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowerCamelCase_ : str = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
lowerCamelCase_ : List[str] = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowerCamelCase_ : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
lowerCamelCase_ : int = normalize_df[:, 2].tolist()
lowerCamelCase_ : str = normalize_df[:, 0].tolist()
lowerCamelCase_ : Any = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowerCamelCase_ : int = normalize_df[:, [1, 2]].tolist()
lowerCamelCase_ : str = x[: len(x) - 1]
lowerCamelCase_ : str = x[len(x) - 1 :]
# for linear regression & sarimax
lowerCamelCase_ : Dict = total_date[: len(total_date) - 1]
lowerCamelCase_ : Tuple = total_user[: len(total_user) - 1]
lowerCamelCase_ : Dict = total_match[: len(total_match) - 1]
lowerCamelCase_ : str = total_date[len(total_date) - 1 :]
lowerCamelCase_ : Tuple = total_user[len(total_user) - 1 :]
lowerCamelCase_ : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowerCamelCase_ : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowerCamelCase_ : Union[str, Any] = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 246
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
lowerCamelCase_ : Dict = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
lowerCamelCase_ : Optional[int] = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase_ : Union[str, Any] = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 246
| 1
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'''allenai/led-base-16384''': 16384,
}
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Dict =VOCAB_FILES_NAMES
_UpperCAmelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] =LEDTokenizer
_UpperCAmelCase : Dict =["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]="replace" , lowerCAmelCase : int="<s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : List[str]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : Union[str, Any]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Any=False , lowerCAmelCase : Dict=True , **lowerCAmelCase : str , ):
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
A_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
A_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
A_ = add_prefix_space
A_ = pre_tok_class(**lowerCAmelCase )
A_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ = "post_processor"
A_ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase )
if tokenizer_component_instance:
A_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ = tuple(state["sep"] )
if "cls" in state:
A_ = tuple(state["cls"] )
A_ = False
if state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
A_ = add_prefix_space
A_ = True
if state.get("trim_offsets" , lowerCAmelCase ) != trim_offsets:
A_ = trim_offsets
A_ = True
if changes_to_apply:
A_ = getattr(lowerCAmelCase , state.pop("type" ) )
A_ = component_class(**lowerCAmelCase )
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self : Dict , lowerCAmelCase : Optional[int] ):
A_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else value
A_ = value
def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] ):
A_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any] ):
A_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None ):
A_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , ):
A_ = super()._pad(
encoded_inputs=lowerCAmelCase , max_length=lowerCAmelCase , padding_strategy=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
A_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase )
if needs_to_be_padded:
A_ = len(lowerCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 452
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
A_ = json.loads(f.read() )
A_ = collections.OrderedDict()
A_ = collections.OrderedDict()
A_ = collections.OrderedDict()
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(UpperCamelCase_ ):
A_ = b
A_ = idx
for wd in b:
A_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : str =VOCAB_FILES_NAMES
_UpperCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] =["input_ids", "attention_mask"]
def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]="<|endoftext|>" , lowerCAmelCase : Optional[int]="<|endoftext|>" , lowerCAmelCase : Tuple="<|startoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : List[Any]=False , **lowerCAmelCase : Optional[Any] , ):
super().__init__(
unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , do_clean_text=lowerCAmelCase , **lowerCAmelCase , )
if not os.path.isfile(lowerCAmelCase ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(lowerCAmelCase ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
A_ = do_clean_text
A_ , A_ , A_ , A_ = load_vocab_and_emoji(lowerCAmelCase , lowerCAmelCase )
A_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _UpperCAmelCase ( self : str ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _UpperCAmelCase ( self : Optional[int] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple ):
return self.subword_tokenizer.tokenize(lowerCAmelCase , clean=self.do_clean_text )
def _UpperCAmelCase ( self : str , lowerCAmelCase : int ):
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[str] ):
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase )
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] ):
A_ = "".join(lowerCAmelCase ).strip()
return out_string
def _UpperCAmelCase ( self : Any , lowerCAmelCase : "Conversation" ):
A_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
A_ = input_ids[-self.model_max_length :]
return input_ids
def _UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
A_ = 0
if os.path.isdir(lowerCAmelCase ):
A_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
A_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
A_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
A_ = token_index
writer.write(",".join(lowerCAmelCase ) + "\n" )
index += 1
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , lowerCAmelCase )
return vocab_file, emoji_file
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
A_ = vocab # same as swe
A_ = ids_to_tokens # same as bpe
A_ = emoji
A_ = np.max([len(lowerCAmelCase ) for w in self.vocab.keys()] )
A_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
A_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
A_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
A_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
A_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
A_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
A_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Union[str, Any] ):
return len(self.ids_to_tokens )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : int ):
A_ = self.content_repattera.sub("<URL>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<EMAIL>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<TEL>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<DATE>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<DATE>" , lowerCAmelCase )
A_ = self.content_repattera.sub("<PRICE>" , lowerCAmelCase )
A_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=False ):
A_ = text.replace(" " , "<SP>" )
A_ = text.replace(" " , "<SP>" )
A_ = text.replace("\r\n" , "<BR>" )
A_ = text.replace("\n" , "<BR>" )
A_ = text.replace("\r" , "<BR>" )
A_ = text.replace("\t" , "<TAB>" )
A_ = text.replace("—" , "ー" )
A_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
A_ = text.replace(lowerCAmelCase , lowerCAmelCase )
if clean:
A_ = self.clean_text(lowerCAmelCase )
def check_simbol(lowerCAmelCase : Tuple ):
A_ = x.encode()
if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 2:
A_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(lowerCAmelCase : Tuple ):
A_ = x.encode()
if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 3:
A_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_8080 and c <= 0xE2_B07F:
return True
return False
A_ = 0
A_ = []
while pos < len(lowerCAmelCase ):
A_ = min(len(lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
A_ = [] # (token_id, token, pos)
for e in range(lowerCAmelCase , lowerCAmelCase , -1 ):
A_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase ) > 2:
A_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase ) > 0:
# the smallest token_id is adopted
A_ , A_ , A_ = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[0] )[0]
result.append(lowerCAmelCase )
A_ = e
else:
A_ = pos + 1
A_ = text[pos:end]
if check_simbol(lowerCAmelCase ):
result.append("<KIGOU>" )
elif checkuae(lowerCAmelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
A_ = end
return result
def _UpperCAmelCase ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]="\n" ):
A_ = []
A_ = []
A_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase ) > 0:
words.append(bytearray(lowerCAmelCase ).decode("utf-8" , errors="replace" ) )
A_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(lowerCAmelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
words.append(bytearray(lowerCAmelCase ).decode("utf-8" , errors="replace" ) )
A_ = "".join(lowerCAmelCase )
return text
| 452
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 721
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 601
| 0
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : List[str] = 16
_snake_case : int = 32
def snake_case_ (UpperCamelCase : Accelerator , UpperCamelCase : DatasetDict , UpperCamelCase : List[int] , UpperCamelCase : List[int] , UpperCamelCase : int = 16 ):
'''simple docstring'''
_a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_a = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase ),
'''validation''': dataset['''train'''].select(UpperCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
UpperCamelCase , padding='''longest''' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = []
# Download the dataset
_a = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_a = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['''lr''']
_a = int(config['''num_epochs'''] )
_a = int(config['''seed'''] )
_a = int(config['''batch_size'''] )
_a = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase )
# New Code #
# Create our folds:
_a = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_a = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase ):
_a , _a , _a = get_fold_dataloaders(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**UpperCamelCase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_a = []
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a = torch.cat(UpperCamelCase , dim=0 )
_a = torch.stack(UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a = metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase , default=UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase , default=3 , help='''The number of splits to perform across the dataset''' )
_a = parser.parse_args()
_a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 22
|
"""simple docstring"""
import baseaa
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowerCamelCase ( _UpperCamelCase : bytes ) -> str:
'''simple docstring'''
return baseaa.baadecode(_UpperCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = 'Hello World!'
UpperCAmelCase : Tuple = baseaa_encode(test)
print(encoded)
UpperCAmelCase : Any = baseaa_decode(encoded)
print(decoded)
| 139
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : int = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__magic_name__ : Union[str, Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__magic_name__ : int = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(rows * cols * num_images )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
_snake_case = data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.one_hot on tensors." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = labels_dense.shape[0]
_snake_case = numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
_snake_case = numpy.zeros((num_labels, num_classes) )
_snake_case = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(SCREAMING_SNAKE_CASE__ )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@deprecated(
lowerCamelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=dtypes.floataa , lowerCamelCase=True , lowerCamelCase=None , ):
_snake_case , _snake_case = random_seed.get_seed(lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_snake_case = dtypes.as_dtype(lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_snake_case = 10_000
_snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_snake_case = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_snake_case = images.astype(numpy.floataa )
_snake_case = numpy.multiply(lowerCamelCase , 1.0 / 255.0 )
_snake_case = images
_snake_case = labels
_snake_case = 0
_snake_case = 0
@property
def UpperCamelCase( self ):
return self._images
@property
def UpperCamelCase( self ):
return self._labels
@property
def UpperCamelCase( self ):
return self._num_examples
@property
def UpperCamelCase( self ):
return self._epochs_completed
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True ):
if fake_data:
_snake_case = [1] * 784
_snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase )],
[fake_label for _ in range(lowerCamelCase )],
)
_snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perma]
_snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_snake_case = self._num_examples - start
_snake_case = self._images[start : self._num_examples]
_snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perm]
_snake_case = self.labels[perm]
# Start next epoch
_snake_case = 0
_snake_case = batch_size - rest_num_examples
_snake_case = self._index_in_epoch
_snake_case = self._images[start:end]
_snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please write your own downloading logic." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
_snake_case = f.size()
print("Successfully downloaded" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "bytes." )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=dtypes.floataa , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=50_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
_snake_case = fake()
_snake_case = fake()
_snake_case = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
_snake_case = DEFAULT_SOURCE_URL
_snake_case = "train-images-idx3-ubyte.gz"
_snake_case = "train-labels-idx1-ubyte.gz"
_snake_case = "t10k-images-idx3-ubyte.gz"
_snake_case = "t10k-labels-idx1-ubyte.gz"
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
_snake_case = (
"Validation size should be between 0 and "
f'''{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
_snake_case = train_images[:validation_size]
_snake_case = train_labels[:validation_size]
_snake_case = train_images[validation_size:]
_snake_case = train_labels[validation_size:]
_snake_case = {"dtype": dtype, "reshape": reshape, "seed": seed}
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 368
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCAmelCase :
def __init__( self : Dict , __lowerCamelCase : Any ):
UpperCAmelCase__ :Any = data
UpperCAmelCase__ :Node | None = None
class UpperCAmelCase :
def __init__( self : List[str] ):
UpperCAmelCase__ :str = None
UpperCAmelCase__ :Any = None
def __iter__( self : List[Any] ):
UpperCAmelCase__ :str = self.head
while self.head:
yield node.data
UpperCAmelCase__ :Optional[int] = node.next
if node == self.head:
break
def __len__( self : str ):
return sum(1 for _ in self )
def __repr__( self : Tuple ):
return "->".join(str(__lowerCamelCase ) for item in iter(self ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : Any ):
self.insert_nth(len(self ) , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : Any ):
self.insert_nth(0 , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any ):
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
UpperCAmelCase__ :List[Any] = Node(__lowerCamelCase )
if self.head is None:
UpperCAmelCase__ :Union[str, Any] = new_node # first node points itself
UpperCAmelCase__ :Optional[Any] = new_node
elif index == 0: # insert at head
UpperCAmelCase__ :Optional[int] = self.head
UpperCAmelCase__ :Optional[Any] = new_node
else:
UpperCAmelCase__ :List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ :List[str] = temp.next
UpperCAmelCase__ :Optional[int] = temp.next
UpperCAmelCase__ :Union[str, Any] = new_node
if index == len(self ) - 1: # insert at tail
UpperCAmelCase__ :Optional[Any] = new_node
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE ( self : int ):
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
UpperCAmelCase__ :Optional[int] = self.head
if self.head == self.tail: # just one node
UpperCAmelCase__ :List[Any] = None
elif index == 0: # delete head node
UpperCAmelCase__ :Tuple = self.tail.next.next
UpperCAmelCase__ :Any = self.head.next
else:
UpperCAmelCase__ :Dict = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ :Union[str, Any] = temp.next
UpperCAmelCase__ :Tuple = temp.next
UpperCAmelCase__ :Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCAmelCase__ :Union[str, Any] = temp
return delete_node.data
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
return len(self ) == 0
def a__ ( ):
UpperCAmelCase__ :List[Any] = CircularLinkedList()
assert len(UpperCamelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase_ ) == i
circular_linked_list.insert_nth(UpperCamelCase_, i + 1 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1, 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1, 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(0, 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1, 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2, 3 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1, 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCamelCase = False
class UpperCAmelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
return 1_2
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
return 1_2
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return 3_2
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase__ :List[str] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase__ :List[str] = 1_2
UpperCAmelCase__ :Optional[int] = 1_2
UpperCAmelCase__ :Optional[int] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
UpperCAmelCase__ :List[str] = TransformeraDModel(**__lowerCamelCase )
return model
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :Union[str, Any] = '''cpu'''
UpperCAmelCase__ :Optional[Any] = self.dummy_vqvae
UpperCAmelCase__ :Optional[int] = self.dummy_text_encoder
UpperCAmelCase__ :Union[str, Any] = self.dummy_tokenizer
UpperCAmelCase__ :List[Any] = self.dummy_transformer
UpperCAmelCase__ :str = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase__ :Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowerCamelCase )
UpperCAmelCase__ :Any = VQDiffusionPipeline(
vqvae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , transformer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
UpperCAmelCase__ :Any = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :str = '''teddy bear playing in the pool'''
UpperCAmelCase__ :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :Any = pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase__ :str = output.images
UpperCAmelCase__ :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :Dict = pipe(
[prompt] , generator=__lowerCamelCase , output_type='''np''' , return_dict=__lowerCamelCase , num_inference_steps=2 )[0]
UpperCAmelCase__ :int = image[0, -3:, -3:, -1]
UpperCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
UpperCAmelCase__ :int = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :List[Any] = '''cpu'''
UpperCAmelCase__ :int = self.dummy_vqvae
UpperCAmelCase__ :List[str] = self.dummy_text_encoder
UpperCAmelCase__ :Union[str, Any] = self.dummy_tokenizer
UpperCAmelCase__ :Optional[Any] = self.dummy_transformer
UpperCAmelCase__ :List[str] = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase__ :List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCAmelCase__ :List[Any] = VQDiffusionPipeline(
vqvae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , transformer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
UpperCAmelCase__ :str = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :str = '''teddy bear playing in the pool'''
UpperCAmelCase__ :str = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :Union[str, Any] = pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase__ :Any = output.images
UpperCAmelCase__ :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :int = pipe(
[prompt] , generator=__lowerCamelCase , output_type='''np''' , return_dict=__lowerCamelCase , num_inference_steps=2 )[0]
UpperCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ :Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
UpperCAmelCase__ :Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
UpperCAmelCase__ :Optional[Any] = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
UpperCAmelCase__ :List[str] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCAmelCase__ :int = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :Union[str, Any] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowerCamelCase , output_type='''np''' , )
UpperCAmelCase__ :List[str] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 467
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Union[str, Any] = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 719
|
from scipy.stats import pearsonr
import datasets
lowercase : Any = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowercase : int = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowercase : int = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> int:
'''simple docstring'''
if return_pvalue:
a__ : int = pearsonr(lowercase , lowercase)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase , lowercase)[0])}
| 392
| 0
|
def lowerCamelCase__ (_UpperCAmelCase = 50):
SCREAMING_SNAKE_CASE = [[0] * 3 for _ in range(length + 1)]
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length])
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Optional[Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a_ : int = logging.getLogger(__name__)
class __lowercase( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__a : str = 'sequence-classification'
def __init__( self , __a ):
if type(UpperCamelCase__ ) == dict:
__lowerCamelCase : Any = Namespace(**UpperCamelCase__ )
__lowerCamelCase : int = glue_output_modes[hparams.task]
__lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def snake_case_ ( self , **__a ):
return self.model(**UpperCamelCase__ )
def snake_case_ ( self , __a , __a ):
__lowerCamelCase : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase : Optional[int] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
__lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
__lowerCamelCase : str = outputs[0]
__lowerCamelCase : List[Any] = self.trainer.lr_schedulers[0]['''scheduler''']
__lowerCamelCase : Optional[Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.hparams
__lowerCamelCase : Dict = processors[args.task]()
__lowerCamelCase : Any = processor.get_labels()
for mode in ["train", "dev"]:
__lowerCamelCase : Union[str, Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , UpperCamelCase__ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
__lowerCamelCase : str = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
__lowerCamelCase : List[str] = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , __a , __a , __a = False ):
__lowerCamelCase : Tuple = '''dev''' if mode == '''test''' else mode
__lowerCamelCase : Tuple = self._feature_file(UpperCamelCase__ )
logger.info('Loading features from cached file %s' , UpperCamelCase__ )
__lowerCamelCase : List[str] = torch.load(UpperCamelCase__ )
__lowerCamelCase : Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCamelCase : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowerCamelCase : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase : int = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase : Tuple = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def snake_case_ ( self , __a , __a ):
__lowerCamelCase : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase : int = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
__lowerCamelCase : Union[str, Any] = self(**UpperCamelCase__ )
__lowerCamelCase : int = outputs[:2]
__lowerCamelCase : Optional[Any] = logits.detach().cpu().numpy()
__lowerCamelCase : Optional[int] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
__lowerCamelCase : Union[str, Any] = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase : Optional[Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase : List[Any] = np.squeeze(UpperCamelCase__ )
__lowerCamelCase : int = np.concatenate([x['target'] for x in outputs] , axis=0 )
__lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase : Optional[Any] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
__lowerCamelCase : List[Any] = dict(results.items() )
__lowerCamelCase : Optional[Any] = results
return ret, preds_list, out_label_list
def snake_case_ ( self , __a ):
__lowerCamelCase : Tuple = self._eval_end(UpperCamelCase__ )
__lowerCamelCase : Optional[int] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case_ ( self , __a ):
__lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
__lowerCamelCase : Dict = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case_ ( __a , __a ):
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=UpperCamelCase__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCAmelCase ( ) -> Dict:
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
__lowerCamelCase : Any = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
__lowerCamelCase : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowerCamelCase : List[str] = os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__lowerCamelCase : List[Any] = GLUETransformer(__UpperCamelCase )
__lowerCamelCase : List[str] = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowerCamelCase : Any = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCamelCase ) )
__lowerCamelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 711
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : Optional[int] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.0_1),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case_ ( cls ):
__lowerCamelCase : Tuple = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def snake_case_ ( self ):
__lowerCamelCase : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id='test-config' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
__lowerCamelCase : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id='valid_org/test-config-org' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
CustomConfig.register_for_auto_class()
__lowerCamelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase : List[str] = c.n_embd + 1 # int
__lowerCamelCase : Dict = c.resid_pdrop + 1.0 # float
__lowerCamelCase : int = not c.scale_attn_weights # bool
__lowerCamelCase : Optional[int] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__a , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__a , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__a , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__a , c.summary_type , 'mismatch for key: summary_type' )
def snake_case_ ( self ):
__lowerCamelCase : Tuple = PretrainedConfig()
__lowerCamelCase : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCamelCase : int = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {", ".join(__a )}.''' )
def snake_case_ ( self ):
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__a )
def snake_case_ ( self ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : Tuple = 500
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[Any] = HTTPError
__lowerCamelCase : str = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__a ) as mock_head:
__lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCamelCase : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowerCamelCase : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase : Any = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase : Any = ['config.42.0.0.json']
__lowerCamelCase : Tuple = 768
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , 'config.4.0.0.json' ) , os.path.join(__a , 'config.42.0.0.json' ) )
__lowerCamelCase : Dict = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 768 )
def snake_case_ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCamelCase : List[str] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCamelCase : Tuple = 'v4.0.0'
__lowerCamelCase , __lowerCamelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase : Union[str, Any] = 'v3.0.0'
__lowerCamelCase : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 768 )
| 263
| 0
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Optional[int] ="""pt"""
elif is_tf_available():
A_ : int ="""tf"""
else:
A_ : Tuple ="""jax"""
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Dict = ByTaTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def snake_case_ ( self ):
super().setUp()
_lowerCamelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self ):
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case_ ( self , **a__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def snake_case_ ( self , a__ , a__=False , a__=20 , a__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowerCamelCase = []
for i in range(len(a__ ) ):
try:
_lowerCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCamelCase = list(filter(lambda a__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , a__ ) )
_lowerCamelCase = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
_lowerCamelCase = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
_lowerCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCamelCase = [t[0] for t in toks]
# Ensure consistency
_lowerCamelCase = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
_lowerCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
_lowerCamelCase = ' ' + output_txt
_lowerCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowerCamelCase = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = 'Unicode €.'
_lowerCamelCase = tokenizer(a__ )
_lowerCamelCase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , a__ )
# decoding
_lowerCamelCase = tokenizer.decode(a__ )
self.assertEqual(a__ , 'Unicode €.</s>' )
_lowerCamelCase = tokenizer('e è é ê ë' )
_lowerCamelCase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , a__ )
# decoding
_lowerCamelCase = tokenizer.decode(a__ )
self.assertEqual(a__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowerCamelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
_lowerCamelCase = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
_lowerCamelCase = list(batch.input_ids.numpy()[0] )
else:
_lowerCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCamelCase = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , a__ )
self.assertIn('attention_mask' , a__ )
self.assertNotIn('decoder_input_ids' , a__ )
self.assertNotIn('decoder_attention_mask' , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = [
'Summary of the text.',
'Another summary.',
]
_lowerCamelCase = tokenizer(
text_target=a__ , max_length=32 , padding='max_length' , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = ['A long paragraph for summarization. </s>']
_lowerCamelCase = ['Summary of the text. </s>']
# fmt: off
_lowerCamelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
_lowerCamelCase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
_lowerCamelCase = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch['input_ids'][0] )
self.assertEqual(a__ , batch['labels'][0] )
def snake_case_ ( self ):
# safety check on max_len default value so we are sure the test works
_lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = ' He is very happy, UNwant\u00E9d,running'
_lowerCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCamelCase = tokenizer.__class__.from_pretrained(a__ )
_lowerCamelCase = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
_lowerCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowerCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCamelCase = tokenizer.__class__.from_pretrained(a__ )
_lowerCamelCase = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCamelCase = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def snake_case_ ( self ):
_lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase = json.load(a__ )
with open(os.path.join(a__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase = json.load(a__ )
_lowerCamelCase = [F'<extra_id_{i}>' for i in range(1_25 )]
_lowerCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowerCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(a__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCamelCase = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=a__ )]
_lowerCamelCase = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def snake_case_ ( self ):
_lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
_lowerCamelCase = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_lowerCamelCase = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_lowerCamelCase = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowerCamelCase = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_lowerCamelCase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowerCamelCase = 0
_lowerCamelCase = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + '_id' , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + '_id' ) , a__ )
setattr(a__ , attr + '_id' , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + '_id' ) , a__ )
setattr(a__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens_ids' ) , [] )
setattr(a__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 650
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A_ : Union[str, Any] ={
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "dhaka" , snake_case : int = 5 )-> int:
_lowerCamelCase = min(snake_case , 50 ) # Prevent abuse!
_lowerCamelCase = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_lowerCamelCase = requests.get('https://www.google.com/search' , params=snake_case , headers=snake_case )
_lowerCamelCase = BeautifulSoup(html.text , 'html.parser' )
_lowerCamelCase = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_lowerCamelCase = json.dumps(snake_case )
_lowerCamelCase = json.loads(snake_case )
_lowerCamelCase = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , snake_case , )
if not matched_google_image_data:
return 0
_lowerCamelCase = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(snake_case ) , )
_lowerCamelCase = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = urllib.request.build_opener()
_lowerCamelCase = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(snake_case )
_lowerCamelCase = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
A_ : Any =download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 650
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {"""vocab_file""": """vocab.json"""}
__lowerCamelCase : List[str] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
__lowerCamelCase : Any = {"""mgp-str""": 27}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_="[GO]" , A_="[GO]" , A_="[s]" , A_="[GO]" , **A_ ):
'''simple docstring'''
super().__init__(
unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ , )
with open(A_ , encoding="utf-8" ) as vocab_handle:
UpperCamelCase : List[str] = json.load(A_ )
UpperCamelCase : Tuple = {v: k for k, v in self.vocab.items()}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.vocab )
def __UpperCamelCase( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = []
for s in text:
char_tokens.extend(A_ )
return char_tokens
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.decoder.get(A_ )
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error("Vocabulary path ({}) should be a directory".format(A_ ) )
return
UpperCamelCase : Optional[int] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + "\n" )
return (vocab_file,)
| 38
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Optional[Any] = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''realm'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=1_28 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=8 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=2_56 , lowerCAmelCase__=10 , lowerCAmelCase__=1E-3 , lowerCAmelCase__=5 , lowerCAmelCase__=3_20 , lowerCAmelCase__=13_35_37_18 , lowerCAmelCase__=50_00 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
# Common config
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = retriever_proj_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_candidates
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
# Reader config
__lowercase = span_hidden_size
__lowercase = max_span_width
__lowercase = reader_layer_norm_eps
__lowercase = reader_beam_size
__lowercase = reader_seq_len
# Retrieval config
__lowercase = num_block_records
__lowercase = searcher_beam_size
| 534
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a : Dict = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__lowercase = [144, 192, 240]
__lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__lowercase = [96, 120, 144]
__lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__lowercase = [64, 80, 96]
__lowercase = [16, 16, 24, 48, 64, 80, 320]
__lowercase = 0.05
__lowercase = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
__lowercase = 512
__lowercase = 16
__lowercase = 21
__lowercase = '''pascal-voc-id2label.json'''
else:
__lowercase = 1000
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = '''huggingface/label-files'''
__lowercase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(lowercase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( lowercase , lowercase=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
__lowercase = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
__lowercase = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
__lowercase = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
__lowercase = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
__lowercase = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
__lowercase = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
__lowercase = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
__lowercase = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
__lowercase = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
__lowercase = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
__lowercase = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
__lowercase = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
__lowercase = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
__lowercase = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
__lowercase = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
__lowercase = name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
__lowercase = name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
__lowercase = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
__lowercase = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
__lowercase = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
__lowercase = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
__lowercase = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
__lowercase = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
__lowercase = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
__lowercase = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
__lowercase = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
__lowercase = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
__lowercase = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
__lowercase = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
__lowercase = '''mobilevit.''' + name
return name
def UpperCAmelCase ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
if base_model:
__lowercase = ''''''
else:
__lowercase = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(lowercase )
if key[:8] == "encoder.":
__lowercase = key[8:]
if "qkv" in key:
__lowercase = key.split('''.''' )
__lowercase = int(key_split[0][6:] ) - 1
__lowercase = int(key_split[3] )
__lowercase = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
__lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__lowercase = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[dim : dim * 2]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
__lowercase = get_mobilevit_config(lowercase )
# load original state_dict
__lowercase = torch.load(lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
__lowercase = MobileViTForSemanticSegmentation(lowercase ).eval()
else:
__lowercase = MobileViTForImageClassification(lowercase ).eval()
__lowercase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__lowercase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowercase = model(**lowercase )
__lowercase = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__lowercase = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__lowercase = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__lowercase = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__lowercase = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__lowercase = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , lowercase , atol=1E-4 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase )
if push_to_hub:
__lowercase = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
__lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase , organization='''apple''' )
model.push_to_hub(lowercase , organization='''apple''' )
if __name__ == "__main__":
__a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 534
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( snake_case__ : int ):
if num <= 0:
A = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(snake_case__ )
A = [True] * (num + 1)
A = []
A = 2
A = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 22
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 22
| 1
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase_ = logging.get_logger(__name__)
class A__ ( _A ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 154
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , __snake_case : Tuple , __snake_case : Tuple=7 , __snake_case : int=3 , __snake_case : List[str]=30 , __snake_case : Optional[Any]=4_00 , __snake_case : Dict=True , __snake_case : Optional[Any]=None , __snake_case : Dict=True , __snake_case : Any=[0.5, 0.5, 0.5] , __snake_case : Tuple=[0.5, 0.5, 0.5] , __snake_case : Any=True , __snake_case : str=1 / 2_55 , __snake_case : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def lowerCamelCase_ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : int , __snake_case : Union[str, Any] , __snake_case : List[Any]=False ):
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = self.size['''shortest_edge''']
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[0] )[0]
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = DetaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
# prepare image and target
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
UpperCAmelCase_ = DetaImageProcessor()
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def lowerCamelCase_ ( self : int ):
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
UpperCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase_ = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
UpperCAmelCase_ = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 144
| 0
|
from __future__ import annotations
__a = list[list[int]]
# assigning initial values to the grid
__a = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__a = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if location := find_empty_location(_lowercase ):
UpperCAmelCase_, UpperCAmelCase_ : Tuple = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowercase , _lowercase , _lowercase , _lowercase ):
UpperCAmelCase_ : List[str] = digit
if sudoku(_lowercase ) is not None:
return grid
UpperCAmelCase_ : List[Any] = 0
return None
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_lowercase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__a = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 300
|
from __future__ import annotations
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_, UpperCAmelCase_ : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : Optional[Any] = result + left + right
return input_list
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if len(_lowercase ) <= 1:
return input_list
UpperCAmelCase_ : List[Any] = list(_lowercase )
# iteration for two-way merging
UpperCAmelCase_ : Dict = 2
while p <= len(_lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowercase ) , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : Union[str, Any] = i + p - 1
UpperCAmelCase_ : Optional[int] = (low + high + 1) // 2
UpperCAmelCase_ : Tuple = merge(_lowercase , _lowercase , _lowercase , _lowercase )
# final merge of last two parts
if p * 2 >= len(_lowercase ):
UpperCAmelCase_ : List[Any] = i
UpperCAmelCase_ : Tuple = merge(_lowercase , 0 , _lowercase , len(_lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__a = []
else:
__a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 300
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.