code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 716 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = """▁"""
lowerCAmelCase : List[Any] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase : Dict = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowerCAmelCase : Dict = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
lowerCAmelCase : Union[str, Any] = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="<unk>" , snake_case__="m2m100" , snake_case__ = None , snake_case__=8 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : str = language_codes
_lowerCAmelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : int = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : Tuple = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case__ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ , tgt_lang=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , language_codes=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case__ )
_lowerCAmelCase : Any = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Any = spm_file
_lowerCAmelCase : int = load_spm(snake_case__ , self.sp_model_kwargs )
_lowerCAmelCase : str = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case__ ): self.encoder_size + i for i, lang_code in enumerate(snake_case__ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case__ )}
_lowerCAmelCase : str = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else 'en'
_lowerCAmelCase : Union[str, Any] = tgt_lang
_lowerCAmelCase : Dict = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def a ( self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case__ , self.encoder[self.unk_token] )
def a ( self , snake_case__ ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case__ , self.unk_token )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Tuple = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
_lowerCAmelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.__dict__.copy()
_lowerCAmelCase : Dict = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Path(snake_case__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_lowerCAmelCase : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_lowerCAmelCase : str = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , snake_case__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case__ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (str(snake_case__ ), str(snake_case__ ))
def a ( self , snake_case__ , snake_case__ = "en" , snake_case__ = None , snake_case__ = "ro" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Any = src_lang
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : List[str] = self(snake_case__ , add_special_tokens=snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = self.get_lang_id(snake_case__ )
_lowerCAmelCase : Union[str, Any] = tgt_lang_id
return inputs
def a ( self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case__ )
_lowerCAmelCase : List[str] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : Tuple = [self.eos_token_id]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case__ )
_lowerCAmelCase : Optional[int] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : Dict = [self.eos_token_id]
def a ( self , snake_case__ ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case__ )
return self.lang_token_to_id[lang_token]
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A , indent=2 )
| 717 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=2 , snake_case__=32 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=4 , snake_case__=[0, 1, 2, 3] , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=3 , snake_case__=[1, 384, 24, 24] , snake_case__=True , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Dict = backbone_out_indices
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = backbone_featmap_shape
_lowerCAmelCase : Union[str, Any] = scope
_lowerCAmelCase : Any = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[Any] = num_patches + 1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=snake_case__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DPTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : int = DPTForDepthEstimation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DPTForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Dict = config_and_inputs
_lowerCAmelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__magic_name__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DPTModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case__ )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def a ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = True
if model_class in get_values(snake_case__ ):
continue
_lowerCAmelCase : Any = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
_lowerCAmelCase : int = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_lowerCAmelCase : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def a ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Optional[int] = True
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
_lowerCAmelCase : List[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_lowerCAmelCase : Any = model(**snake_case__ ).loss
loss.backward()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(config=snake_case__ )
# Skip the check for the backbone
_lowerCAmelCase : int = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCAmelCase : str = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
@slow
def a ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCAmelCase : List[Any] = DPTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = 'add'
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Dict = DPTForDepthEstimation(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_lowerCAmelCase : List[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(snake_case__ )
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : str = model(**snake_case__ )
_lowerCAmelCase : int = outputs.predicted_depth
# verify the predicted depth
_lowerCAmelCase : List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , snake_case__ )
_lowerCAmelCase : str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , snake_case__ , atol=1E-4 ) )
| 718 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
lowerCAmelCase : Tuple = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : Optional[Any] = re.compile(
F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.sp_model )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ )
# Normalize whitespaces
_lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ )
return text
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return out_string
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : int = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ )
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
else:
_lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text]
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : int = torch.tensor(snake_case__ )
return token_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : str = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=snake_case__ )
| 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCAmelCase : str = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowerCAmelCase : Optional[Any] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowerCAmelCase : List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowercase (_A ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase (_A ) -> Union[str, Any]:
"""simple docstring"""
return x[0]
def lowercase (_A ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase : Tuple = get_letter_count(_A )
_lowerCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_A )
_lowerCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_A )
_lowerCAmelCase : Dict = ''.join(freq_to_letter[freq] )
_lowerCAmelCase : List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_A , reverse=_A )
_lowerCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_A )
def lowercase (_A ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase : Optional[int] = get_frequency_order(_A )
_lowerCAmelCase : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pix2struct_text_model"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=5_0244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1E-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[Any] = d_kv
_lowerCAmelCase : Optional[Any] = d_ff
_lowerCAmelCase : Optional[int] = num_layers
_lowerCAmelCase : Tuple = num_heads
_lowerCAmelCase : Optional[Any] = relative_attention_num_buckets
_lowerCAmelCase : List[Any] = relative_attention_max_distance
_lowerCAmelCase : Optional[int] = dropout_rate
_lowerCAmelCase : Dict = layer_norm_epsilon
_lowerCAmelCase : Union[str, Any] = initializer_factor
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : Optional[int] = eos_token_id
_lowerCAmelCase : Optional[int] = decoder_start_token_id
# for backwards compatibility
_lowerCAmelCase : Dict = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
_lowerCAmelCase : Optional[Any] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowerCAmelCase : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1E-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : str = patch_embed_hidden_size
_lowerCAmelCase : Optional[int] = d_ff
_lowerCAmelCase : str = dropout_rate
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[int] = initializer_factor
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = dense_act_fn
_lowerCAmelCase : Union[str, Any] = seq_len
_lowerCAmelCase : Dict = relative_attention_num_buckets
_lowerCAmelCase : Tuple = relative_attention_max_distance
_lowerCAmelCase : List[Any] = d_kv
@classmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
_lowerCAmelCase : List[Any] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowerCAmelCase : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "pix2struct"
__magic_name__ = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
_lowerCAmelCase : List[Any] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
_lowerCAmelCase : Tuple = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
_lowerCAmelCase : List[Any] = PixaStructTextConfig(**snake_case__ )
_lowerCAmelCase : Optional[Any] = PixaStructVisionConfig(**snake_case__ )
_lowerCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
_lowerCAmelCase : int = self.text_config.pad_token_id
_lowerCAmelCase : List[str] = self.text_config.eos_token_id
_lowerCAmelCase : Union[str, Any] = initializer_factor
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = self.initializer_range
_lowerCAmelCase : Tuple = self.initializer_range
_lowerCAmelCase : Tuple = is_vqa
@classmethod
def a ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[Any] = self.text_config.to_dict()
_lowerCAmelCase : Tuple = self.vision_config.to_dict()
_lowerCAmelCase : str = self.__class__.model_type
return output
| 701 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 630 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Any = 13
_lowerCAmelCase : Dict = 7
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : int = 99
_lowerCAmelCase : int = 32
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = 4
_lowerCAmelCase : Dict = 37
_lowerCAmelCase : str = 'gelu'
_lowerCAmelCase : str = 0.1
_lowerCAmelCase : Any = 0.1
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : int = 16
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Optional[int] = 0.02
_lowerCAmelCase : int = 3
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : str = None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : str = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFDistilBertModel(config=snake_case__ )
_lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase : List[str] = model(snake_case__ )
_lowerCAmelCase : List[str] = [input_ids, input_mask]
_lowerCAmelCase : Tuple = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFDistilBertForMaskedLM(config=snake_case__ )
_lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = TFDistilBertForQuestionAnswering(config=snake_case__ )
_lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : List[str] = TFDistilBertForSequenceClassification(snake_case__ )
_lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase : Any = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : Optional[Any] = TFDistilBertForMultipleChoice(snake_case__ )
_lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
_lowerCAmelCase : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Dict = TFDistilBertForTokenClassification(snake_case__ )
_lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
(_lowerCAmelCase) : str = config_and_inputs
_lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__magic_name__ = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = TFDistilBertModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , dim=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_lowerCAmelCase : Any = TFDistilBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowerCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Dict = model(snake_case__ )[0]
_lowerCAmelCase : int = [1, 6, 768]
self.assertEqual(output.shape , snake_case__ )
_lowerCAmelCase : List[Any] = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1E-4 )
| 702 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MvpTokenizer
__magic_name__ = MvpTokenizerFast
__magic_name__ = True
__magic_name__ = filter_roberta_detectors
def a ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCAmelCase : Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : str = {'unk_token': '<unk>'}
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def a ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def a ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase : Any = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Tuple = tokenizer(snake_case__ , max_length=len(snake_case__ ) , padding=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case__ , snake_case__ )
# Test that special tokens are reset
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : List[Any] = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , snake_case__ )
self.assertIn('attention_mask' , snake_case__ )
self.assertNotIn('labels' , snake_case__ )
self.assertNotIn('decoder_attention_mask' , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Union[str, Any] = tokenizer(text_target=snake_case__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : List[str] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ['A long paragraph for summarization.']
_lowerCAmelCase : Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , text_target=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Union[str, Any] = inputs['input_ids']
_lowerCAmelCase : List[Any] = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = 'A, <mask> AllenNLP sentence.'
_lowerCAmelCase : List[str] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 703 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 630 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["image_processor"]
__magic_name__ = "SamImageProcessor"
def __init__( self , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ )
_lowerCAmelCase : List[str] = self.image_processor
_lowerCAmelCase : Tuple = -10
_lowerCAmelCase : List[str] = self.image_processor.size['longest_edge']
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# pop arguments that are not used in the foward but used nevertheless
_lowerCAmelCase : str = encoding_image_processor['original_sizes']
if hasattr(snake_case__ , 'numpy' ): # Checks if Torch or TF tensor
_lowerCAmelCase : str = original_sizes.numpy()
_lowerCAmelCase : Optional[int] = self._check_and_preprocess_points(
input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , )
_lowerCAmelCase : List[str] = self._normalize_and_convert(
snake_case__ , snake_case__ , input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , return_tensors=snake_case__ , )
return encoding_image_processor
def a ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(snake_case__ ) != len(snake_case__ ):
_lowerCAmelCase : List[Any] = [
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] ) for point in input_points
]
else:
_lowerCAmelCase : Tuple = [
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ )
for point, original_size in zip(snake_case__ , snake_case__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCAmelCase : Any = self._pad_points_and_labels(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = np.array(snake_case__ )
if input_labels is not None:
_lowerCAmelCase : str = np.array(snake_case__ )
if input_boxes is not None:
if len(snake_case__ ) != len(snake_case__ ):
_lowerCAmelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] , is_bounding_box=snake_case__ )
for box in input_boxes
]
else:
_lowerCAmelCase : int = [
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ , is_bounding_box=snake_case__ )
for box, original_size in zip(snake_case__ , snake_case__ )
]
_lowerCAmelCase : List[Any] = np.array(snake_case__ )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCAmelCase : Tuple = torch.from_numpy(snake_case__ )
# boxes batch size of 1 by default
_lowerCAmelCase : str = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCAmelCase : Tuple = tf.convert_to_tensor(snake_case__ )
# boxes batch size of 1 by default
_lowerCAmelCase : int = tf.expand_dims(snake_case__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCAmelCase : Optional[int] = torch.from_numpy(snake_case__ )
# point batch size of 1 by default
_lowerCAmelCase : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(snake_case__ )
# point batch size of 1 by default
_lowerCAmelCase : Union[str, Any] = tf.expand_dims(snake_case__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ )
# point batch size of 1 by default
_lowerCAmelCase : Optional[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCAmelCase : List[str] = tf.convert_to_tensor(snake_case__ )
# point batch size of 1 by default
_lowerCAmelCase : Optional[Any] = tf.expand_dims(snake_case__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = max([point.shape[0] for point in input_points] )
_lowerCAmelCase : str = []
for i, point in enumerate(snake_case__ ):
if point.shape[0] != expected_nb_points:
_lowerCAmelCase : str = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_lowerCAmelCase : Any = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(snake_case__ )
_lowerCAmelCase : str = processed_input_points
return input_points, input_labels
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
_lowerCAmelCase : str = original_size
_lowerCAmelCase : Tuple = self.image_processor._get_preprocess_shape(snake_case__ , longest_edge=snake_case__ )
_lowerCAmelCase : str = deepcopy(snake_case__ ).astype(snake_case__ )
if is_bounding_box:
_lowerCAmelCase : int = coords.reshape(-1 , 2 , 2 )
_lowerCAmelCase : Optional[int] = coords[..., 0] * (new_w / old_w)
_lowerCAmelCase : Optional[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCAmelCase : Tuple = coords.reshape(-1 , 4 )
return coords
def a ( self , snake_case__=None , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(snake_case__ , 'numpy' ): # Checks for TF or Torch tensor
_lowerCAmelCase : Dict = input_points.numpy().tolist()
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_points[0] , snake_case__ ):
raise ValueError('Input points must be a list of list of floating points.' )
_lowerCAmelCase : Optional[int] = [np.array(snake_case__ ) for input_point in input_points]
else:
_lowerCAmelCase : Optional[Any] = None
if input_labels is not None:
if hasattr(snake_case__ , 'numpy' ):
_lowerCAmelCase : Optional[Any] = input_labels.numpy().tolist()
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_labels[0] , snake_case__ ):
raise ValueError('Input labels must be a list of list integers.' )
_lowerCAmelCase : List[Any] = [np.array(snake_case__ ) for label in input_labels]
else:
_lowerCAmelCase : Optional[Any] = None
if input_boxes is not None:
if hasattr(snake_case__ , 'numpy' ):
_lowerCAmelCase : Optional[Any] = input_boxes.numpy().tolist()
if (
not isinstance(snake_case__ , snake_case__ )
or not isinstance(input_boxes[0] , snake_case__ )
or not isinstance(input_boxes[0][0] , snake_case__ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_lowerCAmelCase : Dict = [np.array(snake_case__ ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCAmelCase : Optional[Any] = None
return input_points, input_labels, input_boxes
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case__ ) )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.image_processor.post_process_masks(*snake_case__ , **snake_case__ )
| 704 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : Optional[Any] = re.compile(
F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.sp_model )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ )
# Normalize whitespaces
_lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ )
return text
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return out_string
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : int = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ )
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
else:
_lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text]
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : int = torch.tensor(snake_case__ )
return token_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : str = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=snake_case__ )
| 630 | 0 |
from __future__ import annotations
import queue
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = data
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : int = None
def lowercase ():
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
_lowerCAmelCase : Optional[int] = input('Enter the value of the root node: ' ).strip().lower()
_lowerCAmelCase : queue.Queue = queue.Queue()
_lowerCAmelCase : Union[str, Any] = TreeNode(int(_A ) )
q.put(_A )
while not q.empty():
_lowerCAmelCase : Any = q.get()
_lowerCAmelCase : Tuple = f'Enter the left node of {node_found.data}: '
_lowerCAmelCase : List[Any] = input(_A ).strip().lower() or 'n'
if check == "n":
return tree_node
_lowerCAmelCase : Tuple = TreeNode(int(_A ) )
_lowerCAmelCase : Optional[int] = left_node
q.put(_A )
_lowerCAmelCase : Tuple = f'Enter the right node of {node_found.data}: '
_lowerCAmelCase : List[str] = input(_A ).strip().lower() or 'n'
if check == "n":
return tree_node
_lowerCAmelCase : Dict = TreeNode(int(_A ) )
_lowerCAmelCase : Union[str, Any] = right_node
q.put(_A )
raise
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
_lowerCAmelCase : queue.Queue = queue.Queue()
q.put(_A )
while not q.empty():
_lowerCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
_lowerCAmelCase : queue.Queue = queue.Queue()
q.put(_A )
while not q.empty():
_lowerCAmelCase : Optional[Any] = []
while not q.empty():
_lowerCAmelCase : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_A )
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
_lowerCAmelCase : list[TreeNode] = []
_lowerCAmelCase : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_A )
_lowerCAmelCase : str = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase : Dict = stack.pop()
# start to traverse its right child
_lowerCAmelCase : Any = n.right
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
_lowerCAmelCase : list[TreeNode] = []
_lowerCAmelCase : List[str] = node
while n or stack:
while n:
stack.append(_A )
_lowerCAmelCase : Any = n.left
_lowerCAmelCase : int = stack.pop()
print(n.data , end=',' )
_lowerCAmelCase : List[Any] = n.right
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ) or not node:
return
_lowerCAmelCase : Union[str, Any] = [], []
_lowerCAmelCase : List[Any] = node
stacka.append(_A )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase : Any = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowercase (_A = "" , _A=5_0 , _A="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase : int = divmod(width - len(_A ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCAmelCase : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 705 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = (DDPMScheduler,)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Dict = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Tuple = pred_prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
_lowerCAmelCase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
_lowerCAmelCase : str = -1
else:
_lowerCAmelCase : Optional[Any] = timesteps[i + 1]
_lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ )
_lowerCAmelCase : int = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0]
_lowerCAmelCase : int = len(snake_case__ )
with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case__ )
_lowerCAmelCase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 630 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCAmelCase : Union[str, Any] = test_metrics
@require_cpu
def a ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def a ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def a ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def a ( self ):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
_lowerCAmelCase : str = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
| 706 |
'''simple docstring'''
import socket
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Any = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(_A )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase : Union[str, Any] = """Create a default config file for Accelerate with only a few flags set."""
def lowercase (_A="no" , _A = default_json_config_file , _A = False ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = Path(_A )
path.parent.mkdir(parents=_A , exist_ok=_A )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
_lowerCAmelCase : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
_lowerCAmelCase : Any = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_lowerCAmelCase : Dict = torch.cuda.device_count()
_lowerCAmelCase : Optional[int] = num_gpus
_lowerCAmelCase : str = False
if num_gpus > 1:
_lowerCAmelCase : Optional[int] = 'MULTI_GPU'
else:
_lowerCAmelCase : int = 'NO'
elif is_xpu_available() and use_xpu:
_lowerCAmelCase : Dict = torch.xpu.device_count()
_lowerCAmelCase : Union[str, Any] = num_xpus
_lowerCAmelCase : Union[str, Any] = False
if num_xpus > 1:
_lowerCAmelCase : Union[str, Any] = 'MULTI_XPU'
else:
_lowerCAmelCase : Optional[Any] = 'NO'
elif is_npu_available():
_lowerCAmelCase : str = torch.npu.device_count()
_lowerCAmelCase : int = num_npus
_lowerCAmelCase : Optional[Any] = False
if num_npus > 1:
_lowerCAmelCase : Any = 'MULTI_NPU'
else:
_lowerCAmelCase : Dict = 'NO'
else:
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Union[str, Any] = 'NO'
_lowerCAmelCase : Dict = ClusterConfig(**_A )
config.to_json_file(_A )
return path
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = parser.add_parser('default' , parents=_A , help=_A , formatter_class=_A )
parser.add_argument(
'--config_file' , default=_A , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_A , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_A )
return parser
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 707 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 708 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630 | 0 |
'''simple docstring'''
from collections import deque
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = process_name # process name
_lowerCAmelCase : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowerCAmelCase : Dict = arrival_time
_lowerCAmelCase : List[Any] = burst_time # remaining burst time
_lowerCAmelCase : Optional[Any] = 0 # total time of the process wait in ready queue
_lowerCAmelCase : List[str] = 0 # time from arrival time to completion time
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
_lowerCAmelCase : Tuple = time_slices
# unfinished process is in this ready_queue
_lowerCAmelCase : List[str] = queue
# current time
_lowerCAmelCase : str = current_time
# finished process is in this sequence queue
_lowerCAmelCase : deque[Process] = deque()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(len(snake_case__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(len(snake_case__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(len(snake_case__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self , snake_case__ ):
'''simple docstring'''
return [q.burst_time for q in queue]
def a ( self , snake_case__ ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : deque[Process] = deque() # sequence deque of finished process
while len(snake_case__ ) != 0:
_lowerCAmelCase : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(snake_case__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowerCAmelCase : Optional[int] = 0
# set the process's turnaround time because it is finished
_lowerCAmelCase : Optional[int] = self.current_time - cp.arrival_time
# set the completion time
_lowerCAmelCase : Any = self.current_time
# add the process to queue that has finished queue
finished.append(snake_case__ )
self.finish_queue.extend(snake_case__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(snake_case__ ) ):
_lowerCAmelCase : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(snake_case__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowerCAmelCase : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(snake_case__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowerCAmelCase : Dict = 0
# set the finish time
_lowerCAmelCase : Dict = self.current_time
# update the process' turnaround time because it is finished
_lowerCAmelCase : Optional[int] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(snake_case__ )
self.finish_queue.extend(snake_case__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_lowerCAmelCase : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase : int = Process("""P1""", 0, 53)
lowerCAmelCase : Optional[Any] = Process("""P2""", 0, 17)
lowerCAmelCase : Optional[Any] = Process("""P3""", 0, 68)
lowerCAmelCase : Optional[int] = Process("""P4""", 0, 24)
lowerCAmelCase : Dict = 3
lowerCAmelCase : str = [17, 25]
lowerCAmelCase : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase : Optional[Any] = Process("""P1""", 0, 53)
lowerCAmelCase : str = Process("""P2""", 0, 17)
lowerCAmelCase : Dict = Process("""P3""", 0, 68)
lowerCAmelCase : Union[str, Any] = Process("""P4""", 0, 24)
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Optional[int] = [17, 25]
lowerCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase : List[str] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionXLImgaImgPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__magic_name__ = PipelineTesterMixin.required_optional_params - {"latents"}
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase : Optional[int] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
_lowerCAmelCase : Tuple = CLIPTextModel(snake_case__ )
_lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=snake_case__ )
_lowerCAmelCase : int = CLIPTextModelWithProjection(snake_case__ )
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=snake_case__ )
_lowerCAmelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Dict = image / 2 + 0.5
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[str] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Dict = StableDiffusionXLImgaImgPipeline(**snake_case__ )
_lowerCAmelCase : Optional[int] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : str = sd_pipe(**snake_case__ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = StableDiffusionXLImgaImgPipeline(**snake_case__ )
_lowerCAmelCase : int = sd_pipe.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
# forward without prompt embeds
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Optional[int] = 3 * ['this is a negative prompt']
_lowerCAmelCase : List[Any] = negative_prompt
_lowerCAmelCase : int = 3 * [inputs['prompt']]
_lowerCAmelCase : str = sd_pipe(**snake_case__ )
_lowerCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Optional[Any] = 3 * ['this is a negative prompt']
_lowerCAmelCase : List[Any] = 3 * [inputs.pop('prompt' )]
(
_lowerCAmelCase
) : Optional[Any] = sd_pipe.encode_prompt(snake_case__ , negative_prompt=snake_case__ )
_lowerCAmelCase : Tuple = sd_pipe(
**snake_case__ , prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , pooled_prompt_embeds=snake_case__ , negative_pooled_prompt_embeds=snake_case__ , )
_lowerCAmelCase : int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Dict = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : str = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_lowerCAmelCase : Optional[int] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
_lowerCAmelCase : Dict = pipe(**snake_case__ ).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Tuple = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 710 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["transformers", "torch", "note_seq"]
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 712 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house'
_lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] )
_lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowerCAmelCase : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state']
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
| 630 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionLDMaDPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase : Any = CLIPTextModel(snake_case__ )
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = StableDiffusionLDMaDPipeline(**snake_case__ )
_lowerCAmelCase : int = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Union[str, Any] = ldmad_pipe(**snake_case__ )
_lowerCAmelCase : Any = output.rgb, output.depth
_lowerCAmelCase : Dict = rgb[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_lowerCAmelCase : Optional[int] = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_lowerCAmelCase : Tuple = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_dummy_components()
_lowerCAmelCase : Tuple = StableDiffusionLDMaDPipeline(**snake_case__ )
_lowerCAmelCase : int = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : str = 3 * [inputs['prompt']]
# forward
_lowerCAmelCase : List[str] = ldmad_pipe(**snake_case__ )
_lowerCAmelCase : Any = output.rgb, output.depth
_lowerCAmelCase : str = rgb_slice_a[0, -3:, -3:, -1]
_lowerCAmelCase : int = depth_slice_a[0, -3:, -1]
_lowerCAmelCase : int = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Optional[Any] = 3 * [inputs.pop('prompt' )]
_lowerCAmelCase : Dict = ldmad_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
_lowerCAmelCase : int = text_inputs['input_ids'].to(snake_case__ )
_lowerCAmelCase : str = ldmad_pipe.text_encoder(snake_case__ )[0]
_lowerCAmelCase : List[Any] = prompt_embeds
# forward
_lowerCAmelCase : Optional[int] = ldmad_pipe(**snake_case__ )
_lowerCAmelCase : str = output.rgb, output.depth
_lowerCAmelCase : str = rgb_slice_a[0, -3:, -3:, -1]
_lowerCAmelCase : Any = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case__ )
_lowerCAmelCase : Tuple = StableDiffusionLDMaDPipeline(**snake_case__ )
_lowerCAmelCase : int = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : int = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Dict = 'french fries'
_lowerCAmelCase : Optional[Any] = ldmad_pipe(**snake_case__ , negative_prompt=snake_case__ )
_lowerCAmelCase : Dict = output.rgb, output.depth
_lowerCAmelCase : Union[str, Any] = rgb[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_lowerCAmelCase : Optional[Any] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_lowerCAmelCase : Dict = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Dict = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : Any = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_lowerCAmelCase : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_lowerCAmelCase : List[str] = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Dict = self.get_inputs(snake_case__ )
_lowerCAmelCase : Optional[Any] = ldmad_pipe(**snake_case__ )
_lowerCAmelCase : str = output.rgb, output.depth
_lowerCAmelCase : int = rgb[0, -3:, -3:, -1].flatten()
_lowerCAmelCase : List[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_lowerCAmelCase : List[str] = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_lowerCAmelCase : str = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[Any] = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_lowerCAmelCase : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Tuple = self.get_inputs(snake_case__ )
_lowerCAmelCase : int = ldmad_pipe(**snake_case__ )
_lowerCAmelCase : str = output.rgb, output.depth
_lowerCAmelCase : List[str] = 0.49_5586
_lowerCAmelCase : int = 0.3379_5515
_lowerCAmelCase : int = 112.4_8518
_lowerCAmelCase : Optional[int] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : List[Any] = self.get_inputs(snake_case__ )
_lowerCAmelCase : Union[str, Any] = ldmad_pipe(**snake_case__ )
_lowerCAmelCase : List[str] = output.rgb, output.depth
_lowerCAmelCase : str = 0.419_4127
_lowerCAmelCase : str = 0.3537_5586
_lowerCAmelCase : Optional[int] = 0.563_8502
_lowerCAmelCase : List[str] = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 713 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = len(_A )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )]
# Reverse whole list
_lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 630 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=5 , snake_case__=4 , snake_case__=64 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : Dict = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : int = num_choices
_lowerCAmelCase : List[Any] = scope
def a ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : List[str] = model(snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_choices
_lowerCAmelCase : Union[str, Any] = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : str = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : List[str] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(_lowerCAmelCase) : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = True
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = MPNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCAmelCase : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase : Any = model(snake_case__ )[0]
_lowerCAmelCase : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
_lowerCAmelCase : Dict = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Tuple = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Any = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 630 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [0] * len(_A )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
_lowerCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase : List[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
lowerCAmelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
from math import ceil
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = list(range(0 , _A ) )
_lowerCAmelCase : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : List[str] = []
for i in device_map_blocks:
if device_map_blocks.count(_A ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_A )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : Optional[int] = [i for i in device_map_blocks if i not in blocks]
if len(_A ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_A ) )
if len(_A ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_A ) )
if len(_A ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_A ) )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = list(range(_A ) )
_lowerCAmelCase : Dict = int(ceil(n_layers / len(_A ) ) )
_lowerCAmelCase : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , _A , _A )]
return dict(zip(_A , _A ) )
| 716 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_lowerCAmelCase : int = size if size is not None else {'shortest_edge': 18}
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Dict = min_resolution
_lowerCAmelCase : Optional[Any] = max_resolution
_lowerCAmelCase : Dict = do_resize
_lowerCAmelCase : str = size
_lowerCAmelCase : Tuple = do_center_crop
_lowerCAmelCase : List[Any] = crop_size
_lowerCAmelCase : Optional[int] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Optional[int] = image_std
def a ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = LevitImageProcessor if is_vision_available() else None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LevitImageProcessingTester(self )
@property
def a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : Tuple = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : Tuple = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase : Any = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 717 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Any = iter(_A )
while True:
_lowerCAmelCase : int = tuple(itertools.islice(_A , _A ) )
if not chunk:
return
yield chunk
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_lowerCAmelCase : Tuple = ''
if len(_A ) < 2:
return dirty
for i in range(len(_A ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_A ) & 1:
clean += "X"
return clean
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_lowerCAmelCase : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_A )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_A )
return table
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = generate_table(_A )
_lowerCAmelCase : Optional[int] = prepare_input(_A )
_lowerCAmelCase : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_A , 2 ):
_lowerCAmelCase : List[Any] = divmod(table.index(_A ) , 5 )
_lowerCAmelCase : Tuple = divmod(table.index(_A ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = generate_table(_A )
_lowerCAmelCase : Optional[Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_A , 2 ):
_lowerCAmelCase : Any = divmod(table.index(_A ) , 5 )
_lowerCAmelCase : int = divmod(table.index(_A ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
'''simple docstring'''
import qiskit
def lowercase (_A = 2 ):
"""simple docstring"""
_lowerCAmelCase : str = qubits
# Using Aer's simulator
_lowerCAmelCase : Any = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_lowerCAmelCase : List[Any] = qiskit.QuantumCircuit(_A , _A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_A ) ) , list(range(_A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCAmelCase : str = qiskit.execute(_A , _A , shots=1_0_0_0 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
# Base Case
if index == len(UpperCamelCase ):
return True
# Recursive Step
for i in range(UpperCamelCase ):
if valid_coloring(graph[index] , UpperCamelCase , UpperCamelCase ):
# Color current vertex
lowerCamelCase__ : str = i
# Validate coloring
if util_color(UpperCamelCase , UpperCamelCase , UpperCamelCase , index + 1 ):
return True
# Backtrack
lowerCamelCase__ : str = -1
return False
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[int]:
lowerCamelCase__ : int = [-1] * len(UpperCamelCase )
if util_color(UpperCamelCase , UpperCamelCase , UpperCamelCase , 0 ):
return colored_vertices
return []
| 631 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
if (
not isinstance(UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
if (
not isinstance(UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase )
if number < 1:
lowerCamelCase__ : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : Dict = 1
for i in range(1 , UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A : Any =logging.get_logger(__name__)
_A : Tuple =Dict[str, Any]
_A : str =List[Prediction]
@add_end_docstrings(_lowercase )
class _lowercase ( _lowercase ):
def __init__( self: str , *UpperCamelCase__: str , **UpperCamelCase__: int ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Any ):
lowerCamelCase__ : Any = {}
if "threshold" in kwargs:
lowerCamelCase__ : Dict = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self: int , *UpperCamelCase__: str , **UpperCamelCase__: int ):
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : List[str] = load_image(UpperCamelCase__ )
lowerCamelCase__ : Dict = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase__ : Tuple = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
lowerCamelCase__ : Any = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
lowerCamelCase__ : int = target_size
return inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = model_inputs.pop("""target_size""" )
lowerCamelCase__ : Optional[int] = self.model(**UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase__ : Optional[Any] = model_inputs["""bbox"""]
return model_outputs
def lowerCamelCase_ ( self: str , UpperCamelCase__: int , UpperCamelCase__: Optional[int]=0.9 ):
lowerCamelCase__ : Optional[int] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase__ , lowerCamelCase__ : Any = target_size[0].tolist()
def unnormalize(UpperCamelCase__: Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
lowerCamelCase__ , lowerCamelCase__ : Any = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase__ : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase__ : List[str] = [unnormalize(UpperCamelCase__ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
lowerCamelCase__ : Union[str, Any] = ["""score""", """label""", """box"""]
lowerCamelCase__ : Dict = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for vals in zip(scores.tolist() , UpperCamelCase__ , UpperCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase__ : str = self.image_processor.post_process_object_detection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = raw_annotations[0]
lowerCamelCase__ : str = raw_annotation["""scores"""]
lowerCamelCase__ : List[Any] = raw_annotation["""labels"""]
lowerCamelCase__ : List[str] = raw_annotation["""boxes"""]
lowerCamelCase__ : Any = scores.tolist()
lowerCamelCase__ : Tuple = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase__ : List[str] = [self._get_bounding_box(UpperCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase__ : Dict = ["""score""", """label""", """box"""]
lowerCamelCase__ : Optional[int] = [
dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def lowerCamelCase_ ( self: str , UpperCamelCase__: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = box.int().tolist()
lowerCamelCase__ : Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 631 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowerCamelCase__ : Tuple = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase__ : List[Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : Tuple = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
lowerCamelCase__ : List[Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase__ : Dict = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase__ : Any = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 631 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_A : str ={
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """albert"""
def __init__( self: Optional[int] , UpperCamelCase__: Optional[int]=30_000 , UpperCamelCase__: Union[str, Any]=128 , UpperCamelCase__: List[Any]=4_096 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=1 , UpperCamelCase__: Dict=64 , UpperCamelCase__: Any=16_384 , UpperCamelCase__: int=1 , UpperCamelCase__: Any="gelu_new" , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: Optional[int]=0 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: List[str]=1e-12 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Any="absolute" , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: Any=2 , UpperCamelCase__: List[str]=3 , **UpperCamelCase__: Dict , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Tuple = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Optional[Any] = inner_group_num
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Optional[int] = classifier_dropout_prob
lowerCamelCase__ : Dict = position_embedding_type
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: List[str] ):
if self.task == "multiple-choice":
lowerCamelCase__ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 631 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_A : List[Any] =0
_A : int =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A : Optional[Any] =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_A : Optional[Any] =tuple[int, int]
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Node | None , ):
lowerCamelCase__ : Dict = pos_x
lowerCamelCase__ : int = pos_y
lowerCamelCase__ : List[Any] = (pos_y, pos_x)
lowerCamelCase__ : Dict = goal_x
lowerCamelCase__ : Any = goal_y
lowerCamelCase__ : List[Any] = g_cost
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Any = self.calculate_heuristic()
lowerCamelCase__ : List[Any] = self.g_cost + self.h_cost
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.pos_x - self.goal_x
lowerCamelCase__ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase__ ) + abs(UpperCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: Optional[int] , UpperCamelCase__: Node ):
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self: str , UpperCamelCase__: TPosition , UpperCamelCase__: TPosition ):
lowerCamelCase__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [self.start]
lowerCamelCase__ : list[Node] = []
lowerCamelCase__ : List[Any] = False
def lowerCamelCase_ ( self: int ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowerCamelCase__ : int = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
return [self.start.pos]
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Node ):
lowerCamelCase__ : List[str] = []
for action in delta:
lowerCamelCase__ : List[str] = parent.pos_x + action[1]
lowerCamelCase__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Node | None ):
lowerCamelCase__ : Dict = node
lowerCamelCase__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ : List[Any] = current_node.parent
path.reverse()
return path
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: TPosition , UpperCamelCase__: TPosition ):
lowerCamelCase__ : Dict = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = False
def lowerCamelCase_ ( self: List[str] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase__ : List[Any] = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
self.fwd_astar.closed_nodes.append(UpperCamelCase__ )
self.bwd_astar.closed_nodes.append(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = current_bwd_node
lowerCamelCase__ : Optional[int] = current_fwd_node
lowerCamelCase__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowerCamelCase__ : Union[str, Any] = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase__ )
else:
astar.open_nodes.append(UpperCamelCase__ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Node , UpperCamelCase__: Node ):
lowerCamelCase__ : Optional[Any] = self.fwd_astar.retrace_path(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.bwd_astar.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase__ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_A : Optional[Any] =(0, 0)
_A : Tuple =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A : Optional[Any] =time.time()
_A : List[Any] =AStar(init, goal)
_A : Dict =a_star.search()
_A : Any =time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
_A : List[Any] =time.time()
_A : str =BidirectionalAStar(init, goal)
_A : Optional[Any] =time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 631 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_A : Union[str, Any] ={
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
_A : Union[str, Any] ='''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
_A : int ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict[str, int]:
lowerCamelCase__ : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : List[Any] = get_letter_count(UpperCamelCase )
lowerCamelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase )
lowerCamelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase )
lowerCamelCase__ : List[str] = """""".join(freq_to_letter[freq] )
lowerCamelCase__ : Optional[Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase , reverse=UpperCamelCase )
lowerCamelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Tuple = get_frequency_order(UpperCamelCase )
lowerCamelCase__ : Dict = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Tuple = create_tensor(UpperCamelCase )
lowerCamelCase__ : Optional[int] = gather(UpperCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Tuple = [state.process_index]
lowerCamelCase__ : Dict = gather_object(UpperCamelCase )
assert len(UpperCamelCase ) == state.num_processes, f'''{gathered_obj}, {len(UpperCamelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[Any] = create_tensor(UpperCamelCase )
lowerCamelCase__ : Any = broadcast(UpperCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowerCamelCase__ : Union[str, Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCamelCase__ : Tuple = torch.arange(state.num_processes ).to(state.device )
lowerCamelCase__ : Any = pad_across_processes(UpperCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase__ : Any = create_tensor(UpperCamelCase )
lowerCamelCase__ : Tuple = reduce(UpperCamelCase , """sum""" )
lowerCamelCase__ : Optional[int] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(UpperCamelCase , UpperCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase__ : Dict = create_tensor(UpperCamelCase )
lowerCamelCase__ : Optional[int] = reduce(UpperCamelCase , """mean""" )
lowerCamelCase__ : Tuple = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(UpperCamelCase , UpperCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Dict = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(UpperCamelCase )
state.print("""testing gather_object""" )
test_gather_object(UpperCamelCase )
state.print("""testing broadcast""" )
test_broadcast(UpperCamelCase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(UpperCamelCase )
state.print("""testing reduce_sum""" )
test_reduce_sum(UpperCamelCase )
state.print("""testing reduce_mean""" )
test_reduce_mean(UpperCamelCase )
if __name__ == "__main__":
main()
| 631 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Tuple ={
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """conditional_detr"""
a = ["""past_key_values"""]
a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: Optional[int] , UpperCamelCase__: int=True , UpperCamelCase__: str=None , UpperCamelCase__: Any=3 , UpperCamelCase__: Dict=300 , UpperCamelCase__: int=6 , UpperCamelCase__: Optional[int]=2_048 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: List[str]=6 , UpperCamelCase__: Optional[Any]=2_048 , UpperCamelCase__: List[str]=8 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Tuple="relu" , UpperCamelCase__: Any=256 , UpperCamelCase__: int=0.1 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: List[Any]=1.0 , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: Dict="sine" , UpperCamelCase__: Dict="resnet50" , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Dict=2 , UpperCamelCase__: Union[str, Any]=5 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=1 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Optional[Any]=5 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=0.25 , **UpperCamelCase__: Tuple , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase__ : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[Any] = config_class.from_dict(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = use_timm_backbone
lowerCamelCase__ : str = backbone_config
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : int = num_queries
lowerCamelCase__ : Dict = d_model
lowerCamelCase__ : Optional[Any] = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : int = encoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Optional[Any] = decoder_layers
lowerCamelCase__ : List[str] = decoder_attention_heads
lowerCamelCase__ : Dict = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Optional[Any] = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Any = init_std
lowerCamelCase__ : Tuple = init_xavier_std
lowerCamelCase__ : Dict = encoder_layerdrop
lowerCamelCase__ : Any = decoder_layerdrop
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : str = auxiliary_loss
lowerCamelCase__ : List[str] = position_embedding_type
lowerCamelCase__ : List[Any] = backbone
lowerCamelCase__ : Tuple = use_pretrained_backbone
lowerCamelCase__ : int = dilation
# Hungarian matcher
lowerCamelCase__ : List[str] = class_cost
lowerCamelCase__ : str = bbox_cost
lowerCamelCase__ : Union[str, Any] = giou_cost
# Loss coefficients
lowerCamelCase__ : str = mask_loss_coefficient
lowerCamelCase__ : List[str] = dice_loss_coefficient
lowerCamelCase__ : Optional[int] = cls_loss_coefficient
lowerCamelCase__ : Optional[Any] = bbox_loss_coefficient
lowerCamelCase__ : str = giou_loss_coefficient
lowerCamelCase__ : Tuple = focal_alpha
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self: Any ):
return self.d_model
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : List[Any] = self.backbone_config.to_dict()
lowerCamelCase__ : Union[str, Any] = self.__class__.model_type
return output
class _lowercase ( _lowercase ):
a = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase_ ( self: int ):
return 1e-5
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return 12
| 631 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_A : List[Any] ='''bert-base-cased'''
_A : Optional[int] ='''google/pegasus-xsum'''
_A : Any =[''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
_A : Optional[Any] =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
_A : Union[str, Any] ='''patrickvonplaten/t5-tiny-random'''
_A : Dict ='''sshleifer/bart-tiny-random'''
_A : Tuple ='''sshleifer/tiny-mbart'''
_A : str ='''sshleifer/tiny-marian-en-de'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = """\n""".join(UpperCamelCase )
Path(UpperCamelCase ).open("""w""" ).writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.source''' ) , UpperCamelCase )
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.target''' ) , UpperCamelCase )
return tmp_dir
class _lowercase ( _lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : int = 4
lowerCamelCase__ : List[str] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowerCamelCase__ : List[str] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , )
lowerCamelCase__ : Optional[int] = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase__ : Optional[Any] = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : List[Any] = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : Dict = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : Dict = 4
lowerCamelCase__ : str = LegacySeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=20 , max_target_length=UpperCamelCase__ , )
lowerCamelCase__ : Tuple = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowerCamelCase__ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase__ : Union[str, Any] = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowerCamelCase__ : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCamelCase__ , UpperCamelCase__ , 128 , UpperCamelCase__ )
lowerCamelCase__ : Any = {x.name for x in tmp_dir.iterdir()}
lowerCamelCase__ : Optional[int] = {x.name for x in save_dir.iterdir()}
lowerCamelCase__ : int = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCamelCase__ ) < len(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def lowerCamelCase_ ( self: Tuple ):
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = self._get_dataset(max_len=64 )
lowerCamelCase__ : Optional[int] = 64
lowerCamelCase__ : Tuple = ds.make_dynamic_sampler(UpperCamelCase__ , required_batch_size_multiple=UpperCamelCase__ )
lowerCamelCase__ : List[str] = [len(UpperCamelCase__ ) for x in batch_sampler]
assert len(set(UpperCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCamelCase__ ) == len(UpperCamelCase__ ) # no dropped or added examples
lowerCamelCase__ : Optional[Any] = DataLoader(UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Any = []
for batch in data_loader:
lowerCamelCase__ : Optional[int] = batch["""input_ids"""].shape
lowerCamelCase__ : Optional[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase__ : str = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(UpperCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCamelCase__ )
assert num_src_per_batch[0] == max(UpperCamelCase__ )
if failures:
raise AssertionError(F'''too many tokens in {len(UpperCamelCase__ )} batches''' )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self._get_dataset(max_len=512 )
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : Tuple = ds.make_sortish_sampler(UpperCamelCase__ , shuffle=UpperCamelCase__ )
lowerCamelCase__ : Any = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : int = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int]="input_ids" ):
return [batch[k].eq(UpperCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) ) < sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) )
assert sum(count_pad_tokens(UpperCamelCase__ ) ) < sum(count_pad_tokens(UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict=1_000 , UpperCamelCase__: List[Any]=128 ):
if os.getenv("""USE_REAL_DATA""" , UpperCamelCase__ ):
lowerCamelCase__ : str = """examples/seq2seq/wmt_en_ro"""
lowerCamelCase__ : Any = max_len * 2 * 64
if not Path(UpperCamelCase__ ).joinpath("""train.len""" ).exists():
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = """examples/seq2seq/test_data/wmt_en_ro"""
lowerCamelCase__ : Optional[Any] = max_len * 4
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , n_obs=UpperCamelCase__ , )
return ds, max_tokens, tokenizer
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self._get_dataset()
lowerCamelCase__ : Optional[Any] = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase__ ) )
lowerCamelCase__ : str = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase__ ) )
assert idsa.intersection(UpperCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
if tok_name == MBART_TINY:
lowerCamelCase__ : Dict = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowerCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase__ : Dict = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowerCamelCase__ : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCamelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase__ ) == 0
| 631 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( _lowercase ):
a = """bert"""
def __init__( self: Dict , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Dict=768 , UpperCamelCase__: str=12 , UpperCamelCase__: int=12 , UpperCamelCase__: Optional[Any]=3_072 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Union[str, Any]=512 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Optional[Any]=1e-12 , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: Tuple="absolute" , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=None , **UpperCamelCase__: Dict , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : List[str] = classifier_dropout
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: Optional[Any] ):
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 631 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : List[str] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : Tuple = torch.permute(UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase ):
# linear layer
lowerCamelCase__ : List[str] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
if "metadata" in layer:
lowerCamelCase__ : Optional[Any] = layer.split("""metadata""" )
lowerCamelCase__ : Any = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
lowerCamelCase__ : Dict = layer.split("""kvstore""" )
lowerCamelCase__ : List[str] = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : str = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
lowerCamelCase__ : Optional[int] = layer.split("""/""" )
lowerCamelCase__ : str = """/""".join(split_layer[:-1] )
lowerCamelCase__ : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : Optional[Any] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowerCamelCase__ : List[str] = """file"""
else:
lowerCamelCase__ : List[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = rename_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = {}
for k, v in current_block.items():
lowerCamelCase__ : int = v
lowerCamelCase__ : Optional[Any] = new_current_block
torch.save(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = WEIGHTS_NAME ) -> Tuple:
lowerCamelCase__ : Any = convert_file_size_to_int(UpperCamelCase )
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Optional[Any] = 0
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
lowerCamelCase__ : List[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
lowerCamelCase__ : Dict = flatten_dict(UpperCamelCase , sep="""/""" )
lowerCamelCase__ : Optional[int] = {}
for layer in checkpoint_info.keys():
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = get_key_and_tensorstore_dict(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : List[Any] = content
else:
lowerCamelCase__ : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : str = torch.tensor(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase )
lowerCamelCase__ : List[str] = """/""".join(UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : str = os.path.join(
UpperCamelCase , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[int] = raw_weights.to(getattr(UpperCamelCase , UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : Optional[int] = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : List[Any] = {}
for idx, shard in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[str] = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : Any = shard
for key in shard:
lowerCamelCase__ : Tuple = shard_file
# Add the metadata
lowerCamelCase__ : Optional[Any] = {"""total_size""": total_size}
lowerCamelCase__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ : Optional[Any] = json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + """\n"""
f.write(UpperCamelCase )
return metadata, index
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_A : Any =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : Tuple = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
lowerCamelCase__ : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
lowerCamelCase__ : str = TaTokenizer.from_pretrained("""t5-small""" )
lowerCamelCase__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase , return_tensors="""pt""" ).input_ids
lowerCamelCase__ : int = model.generate(UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 631 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A : Optional[int] ={
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
_A : Optional[int] ={
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : int =logging.get_logger(__name__)
_A : Any ={
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """xmod"""
def __init__( self: Optional[Any] , UpperCamelCase__: int=30_522 , UpperCamelCase__: Any=768 , UpperCamelCase__: Optional[int]=12 , UpperCamelCase__: Dict=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: int=1 , UpperCamelCase__: Any=0 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: int="absolute" , UpperCamelCase__: Dict=True , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Any=2 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: int=True , UpperCamelCase__: int=True , UpperCamelCase__: str=("en_XX",) , UpperCamelCase__: str=None , **UpperCamelCase__: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : List[str] = position_embedding_type
lowerCamelCase__ : List[str] = use_cache
lowerCamelCase__ : Any = classifier_dropout
lowerCamelCase__ : Tuple = pre_norm
lowerCamelCase__ : Union[str, Any] = adapter_reduction_factor
lowerCamelCase__ : Dict = adapter_layer_norm
lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Optional[int] = ln_before_adapter
lowerCamelCase__ : Any = list(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = default_language
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( _lowercase ):
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCamelCase__: ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self: List[Any] ):
raise NotImplementedError()
| 631 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
a = 42
# setable values
a = 42
a = 42
a = None
@classmethod
def lowerCamelCase_ ( cls: Tuple , UpperCamelCase__: CommonSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray ):
return cls(common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ )
@dataclass
class _lowercase ( _lowercase ):
a = 42
class _lowercase ( _lowercase , _lowercase ):
a = [e.name for e in FlaxKarrasDiffusionSchedulers]
a = 42
@property
def lowerCamelCase_ ( self: Tuple ):
return True
@register_to_config
def __init__( self: Tuple , UpperCamelCase__: int = 1_000 , UpperCamelCase__: float = 0.0_001 , UpperCamelCase__: float = 0.02 , UpperCamelCase__: str = "linear" , UpperCamelCase__: Optional[jnp.ndarray] = None , UpperCamelCase__: str = "fixed_small" , UpperCamelCase__: bool = True , UpperCamelCase__: str = "epsilon" , UpperCamelCase__: jnp.dtype = jnp.floataa , ):
lowerCamelCase__ : Optional[Any] = dtype
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[CommonSchedulerState] = None ):
if common is None:
lowerCamelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase__ : List[Any] = jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: Optional[int] = None ):
return sample
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: int , UpperCamelCase__: Tuple = () ):
lowerCamelCase__ : int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase__ : Optional[int] = (jnp.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any=None , UpperCamelCase__: Union[str, Any]=None ):
lowerCamelCase__ : List[Any] = state.common.alphas_cumprod[t]
lowerCamelCase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase__ : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase__ : str = jnp.clip(UpperCamelCase__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase__ : str = jnp.log(jnp.clip(UpperCamelCase__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase__ : Any = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase__ : List[str] = variance
lowerCamelCase__ : Union[str, Any] = state.common.betas[t]
lowerCamelCase__ : List[Any] = (predicted_variance + 1) / 2
lowerCamelCase__ : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: int , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: Optional[jax.random.KeyArray] = None , UpperCamelCase__: bool = True , ):
lowerCamelCase__ : Tuple = timestep
if key is None:
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Any = jnp.split(UpperCamelCase__ , sample.shape[1] , axis=1 )
else:
lowerCamelCase__ : Optional[int] = None
# 1. compute alphas, betas
lowerCamelCase__ : Any = state.common.alphas_cumprod[t]
lowerCamelCase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase__ : Any = 1 - alpha_prod_t
lowerCamelCase__ : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase__ : Dict = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase__ : Dict = jnp.clip(UpperCamelCase__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase__ : List[str] = jax.random.split(UpperCamelCase__ , num=1 )
lowerCamelCase__ : List[Any] = jax.random.normal(UpperCamelCase__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase__ , UpperCamelCase__ , predicted_variance=UpperCamelCase__ ) ** 0.5) * noise
lowerCamelCase__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase__ , state=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , ):
return add_noise_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: DDPMSchedulerState , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , UpperCamelCase__: jnp.ndarray , ):
return get_velocity_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __len__( self: int ):
return self.config.num_train_timesteps
| 631 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 1 |
'''simple docstring'''
from torch import nn
class _lowercase ( nn.Module ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] ):
super().__init__()
lowerCamelCase__ : Union[str, Any] = class_size
lowerCamelCase__ : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCamelCase__ : List[str] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCamelCase__ : Optional[int] = self.mlp(UpperCamelCase__ )
return logits
| 631 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[str] = b.T
lowerCamelCase__ : Optional[Any] = np.sum(np.square(UpperCamelCase ) , axis=1 )
lowerCamelCase__ : str = np.sum(np.square(UpperCamelCase ) , axis=0 )
lowerCamelCase__ : List[Any] = np.matmul(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : str = x.reshape(-1 , 3 )
lowerCamelCase__ : int = squared_euclidean_distance(UpperCamelCase , UpperCamelCase )
return np.argmin(UpperCamelCase , axis=1 )
class _lowercase ( _lowercase ):
a = ["""pixel_values"""]
def __init__( self: List[str] , UpperCamelCase__: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__: bool = True , UpperCamelCase__: Dict[str, int] = None , UpperCamelCase__: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__: bool = True , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Any = size if size is not None else {"""height""": 256, """width""": 256}
lowerCamelCase__ : List[str] = get_size_dict(UpperCamelCase__ )
lowerCamelCase__ : Tuple = np.array(UpperCamelCase__ ) if clusters is not None else None
lowerCamelCase__ : Optional[Any] = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : Tuple = resample
lowerCamelCase__ : str = do_normalize
lowerCamelCase__ : Tuple = do_color_quantize
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: np.ndarray , UpperCamelCase__: Dict[str, int] , UpperCamelCase__: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : Tuple = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: np.ndarray , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , ):
lowerCamelCase__ : Tuple = rescale(image=UpperCamelCase__ , scale=1 / 127.5 , data_format=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image - 1
return image
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: ImageInput , UpperCamelCase__: bool = None , UpperCamelCase__: Dict[str, int] = None , UpperCamelCase__: PILImageResampling = None , UpperCamelCase__: bool = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase__: Any , ):
lowerCamelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : str = size if size is not None else self.size
lowerCamelCase__ : Any = get_size_dict(UpperCamelCase__ )
lowerCamelCase__ : List[str] = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowerCamelCase__ : Optional[Any] = clusters if clusters is not None else self.clusters
lowerCamelCase__ : Any = np.array(UpperCamelCase__ )
lowerCamelCase__ : str = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ : Optional[Any] = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowerCamelCase__ : Tuple = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_normalize:
lowerCamelCase__ : Dict = [self.normalize(image=UpperCamelCase__ ) for image in images]
if do_color_quantize:
lowerCamelCase__ : Any = [to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowerCamelCase__ : Any = np.array(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = color_quantize(UpperCamelCase__ , UpperCamelCase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowerCamelCase__ : Dict = images.shape[0]
lowerCamelCase__ : Optional[int] = images.reshape(UpperCamelCase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowerCamelCase__ : Union[str, Any] = list(UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowerCamelCase__ : int = {"""input_ids""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 631 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
if len(UpperCamelCase ) == 0:
return []
lowerCamelCase__ , lowerCamelCase__ : Dict = min(UpperCamelCase ), max(UpperCamelCase )
lowerCamelCase__ : Optional[int] = int(max_value - min_value ) + 1
lowerCamelCase__ : list[list] = [[] for _ in range(UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(UpperCamelCase )
return [v for bucket in buckets for v in sorted(UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 631 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = TFAutoModel.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Dict = AutoModel.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = AutoModelForPreTraining.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : int = TFAutoModelForCausalLM.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = AutoModelWithLMHead.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : int = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Dict = AutoModelForMaskedLM.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Any = AutoModelForMaskedLM.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Tuple ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Tuple ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : int = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = AutoModelForQuestionAnswering.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase__ ) , 14_410 )
lowerCamelCase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase__ ) , 14_410 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase__ ) , 14_410 )
lowerCamelCase__ : Tuple = AutoModelWithLMHead.from_pretrained(UpperCamelCase__ , from_tf=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase__ ) , 14_410 )
| 631 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A : Union[str, Any] =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
a = ["""pixel_values"""]
def __init__( self: int , UpperCamelCase__: bool = True , UpperCamelCase__: Dict[str, int] = None , UpperCamelCase__: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__: bool = True , UpperCamelCase__: Union[int, float] = 1 / 255 , UpperCamelCase__: bool = True , UpperCamelCase__: Optional[Union[float, List[float]]] = None , UpperCamelCase__: Optional[Union[float, List[float]]] = None , UpperCamelCase__: bool = True , **UpperCamelCase__: Any , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Dict = size if size is not None else {"""height""": 384, """width""": 384}
lowerCamelCase__ : Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : int = size
lowerCamelCase__ : List[str] = resample
lowerCamelCase__ : Optional[Any] = do_rescale
lowerCamelCase__ : Dict = rescale_factor
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ : Optional[Any] = do_convert_rgb
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: np.ndarray , UpperCamelCase__: Dict[str, int] , UpperCamelCase__: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__: Union[str, Any] , ):
lowerCamelCase__ : Dict = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowerCamelCase__ : Tuple = (size["""height"""], size["""width"""])
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: np.ndarray , UpperCamelCase__: Union[int, float] , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__: str , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: np.ndarray , UpperCamelCase__: Union[float, List[float]] , UpperCamelCase__: Union[float, List[float]] , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__: Union[str, Any] , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: ImageInput , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[Dict[str, int]] = None , UpperCamelCase__: PILImageResampling = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[float] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[Union[float, List[float]]] = None , UpperCamelCase__: Optional[Union[float, List[float]]] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: bool = None , UpperCamelCase__: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Dict = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : str = resample if resample is not None else self.resample
lowerCamelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : str = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Any = image_std if image_std is not None else self.image_std
lowerCamelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : str = size if size is not None else self.size
lowerCamelCase__ : Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowerCamelCase__ : str = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : int = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowerCamelCase__ : List[Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCamelCase__ : Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowerCamelCase__ : Optional[Any] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowerCamelCase__ : List[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowerCamelCase__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 631 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> float:
lowerCamelCase__ : List[Any] = 0
while len(UpperCamelCase ) > 1:
lowerCamelCase__ : Union[str, Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCamelCase__ : Tuple = files.index(min(UpperCamelCase ) )
temp += files[min_index]
files.pop(UpperCamelCase )
files.append(UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 1 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : str = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCamelCase__ , lowerCamelCase__ : Tuple = get_aligned_output_features_output_indices(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""c"""] )
self.assertEqual(UpperCamelCase__ , [2] )
# Out indices set to match out features
lowerCamelCase__ , lowerCamelCase__ : Any = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features set to match out indices
lowerCamelCase__ , lowerCamelCase__ : Dict = get_aligned_output_features_output_indices(UpperCamelCase__ , [0, 2] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase__ , lowerCamelCase__ : List[str] = get_aligned_output_features_output_indices(UpperCamelCase__ , [-3, -1] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [-3, -1] )
def lowerCamelCase_ ( self: List[Any] ):
# Stage names must be set
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase__ )
# Out features must be a list
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = BackboneMixin()
lowerCamelCase__ : Optional[Any] = ["""a""", """b""", """c"""]
lowerCamelCase__ : List[str] = ["""a""", """c"""]
lowerCamelCase__ : Any = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase__ : Optional[Any] = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase__ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 631 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 1 |
'''simple docstring'''
import string
import numpy
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
class _lowercase :
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda _lowercase : x % 36 )
a = numpy.vectorize(_lowercase )
def __init__( self: Tuple , UpperCamelCase__: numpy.ndarray ):
lowerCamelCase__ : Optional[int] = self.modulus(UpperCamelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase__ : List[str] = encrypt_key.shape[0]
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str ):
return self.key_string.index(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int ):
return self.key_string[round(UpperCamelCase__ )]
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ : Tuple = det % len(self.key_string )
lowerCamelCase__ : Optional[int] = len(self.key_string )
if greatest_common_divisor(UpperCamelCase__ , len(self.key_string ) ) != 1:
lowerCamelCase__ : Any = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : int = [char for char in text.upper() if char in self.key_string]
lowerCamelCase__ : Optional[int] = chars[-1]
while len(UpperCamelCase__ ) % self.break_key != 0:
chars.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self.process_text(text.upper() )
lowerCamelCase__ : Optional[Any] = """"""
for i in range(0 , len(UpperCamelCase__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ : Any = text[i : i + self.break_key]
lowerCamelCase__ : Tuple = [self.replace_letters(UpperCamelCase__ ) for char in batch]
lowerCamelCase__ : int = numpy.array([vec] ).T
lowerCamelCase__ : int = self.modulus(self.encrypt_key.dot(UpperCamelCase__ ) ).T.tolist()[
0
]
lowerCamelCase__ : List[str] = """""".join(
self.replace_digits(UpperCamelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ : Optional[Any] = det % len(self.key_string )
lowerCamelCase__ : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase__ : Optional[int] = i
break
lowerCamelCase__ : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str ):
lowerCamelCase__ : Tuple = self.make_decrypt_key()
lowerCamelCase__ : Union[str, Any] = self.process_text(text.upper() )
lowerCamelCase__ : str = """"""
for i in range(0 , len(UpperCamelCase__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ : Optional[Any] = text[i : i + self.break_key]
lowerCamelCase__ : Optional[int] = [self.replace_letters(UpperCamelCase__ ) for char in batch]
lowerCamelCase__ : int = numpy.array([vec] ).T
lowerCamelCase__ : Dict = self.modulus(decrypt_key.dot(UpperCamelCase__ ) ).T.tolist()[0]
lowerCamelCase__ : List[Any] = """""".join(
self.replace_digits(UpperCamelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE_ () -> None:
lowerCamelCase__ : List[str] = int(input("""Enter the order of the encryption key: """ ) )
lowerCamelCase__ : Union[str, Any] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(UpperCamelCase ):
lowerCamelCase__ : Dict = [int(UpperCamelCase ) for x in input().split()]
hill_matrix.append(UpperCamelCase )
lowerCamelCase__ : List[str] = HillCipher(numpy.array(UpperCamelCase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
lowerCamelCase__ : Tuple = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
lowerCamelCase__ : List[str] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(UpperCamelCase ) )
elif option == "2":
lowerCamelCase__ : Any = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 631 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_A : int =logging.get_logger(__name__)
_A : Tuple ={
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _lowercase ( _lowercase ):
a = """dpt"""
def __init__( self: Union[str, Any] , UpperCamelCase__: List[str]=768 , UpperCamelCase__: List[str]=12 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Dict=0.0 , UpperCamelCase__: List[str]=0.02 , UpperCamelCase__: int=1e-12 , UpperCamelCase__: Dict=384 , UpperCamelCase__: int=16 , UpperCamelCase__: str=3 , UpperCamelCase__: Any=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Optional[Any]=[2, 5, 8, 11] , UpperCamelCase__: Optional[Any]="project" , UpperCamelCase__: Tuple=[4, 2, 1, 0.5] , UpperCamelCase__: Union[str, Any]=[96, 192, 384, 768] , UpperCamelCase__: Optional[Any]=256 , UpperCamelCase__: List[Any]=-1 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=0.4 , UpperCamelCase__: Union[str, Any]=255 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Any=[1, 1_024, 24, 24] , UpperCamelCase__: Optional[int]=[0, 1] , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCamelCase__ : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCamelCase__ : Union[str, Any] = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCamelCase__ : List[str] = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Dict = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCamelCase__ : List[str] = backbone_featmap_shape
lowerCamelCase__ : List[str] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : str = None
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : str = qkv_bias
lowerCamelCase__ : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCamelCase__ : Union[str, Any] = readout_type
lowerCamelCase__ : Optional[Any] = reassemble_factors
lowerCamelCase__ : Optional[int] = neck_hidden_sizes
lowerCamelCase__ : int = fusion_hidden_size
lowerCamelCase__ : Dict = head_in_index
lowerCamelCase__ : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : Union[str, Any] = use_auxiliary_head
lowerCamelCase__ : Tuple = auxiliary_loss_weight
lowerCamelCase__ : List[str] = semantic_loss_ignore_index
lowerCamelCase__ : Optional[int] = semantic_classifier_dropout
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase__ : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase__ : Any = self.__class__.model_type
return output
| 631 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : int ={
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( _lowercase ):
a = """trajectory_transformer"""
a = ["""past_key_values"""]
a = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: Dict=100 , UpperCamelCase__: str=5 , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: Union[str, Any]=1 , UpperCamelCase__: int=249 , UpperCamelCase__: int=6 , UpperCamelCase__: Any=17 , UpperCamelCase__: str=25 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Any=4 , UpperCamelCase__: Optional[int]=128 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: str=0.0_006 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: Optional[int]=1e-12 , UpperCamelCase__: Tuple=1 , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: str=50_256 , UpperCamelCase__: str=50_256 , **UpperCamelCase__: Any , ):
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Dict = action_weight
lowerCamelCase__ : Optional[Any] = reward_weight
lowerCamelCase__ : Dict = value_weight
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Optional[int] = block_size
lowerCamelCase__ : List[str] = action_dim
lowerCamelCase__ : Dict = observation_dim
lowerCamelCase__ : Union[str, Any] = transition_dim
lowerCamelCase__ : Tuple = learning_rate
lowerCamelCase__ : Tuple = n_layer
lowerCamelCase__ : str = n_head
lowerCamelCase__ : int = n_embd
lowerCamelCase__ : Dict = embd_pdrop
lowerCamelCase__ : Tuple = attn_pdrop
lowerCamelCase__ : List[str] = resid_pdrop
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Any = kaiming_initializer_range
lowerCamelCase__ : Dict = use_cache
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase__ : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Optional[Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase__: List[str] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase__: Optional[Any] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_rust_tokenizer()
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : List[str] = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase__ : Union[str, Any] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Union[str, Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : List[Any] = processor(text=UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = tokenizer(UpperCamelCase__ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase__ : Dict = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = """google/owlvit-base-patch32"""
lowerCamelCase__ : Optional[int] = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Dict = ["""cat""", """nasa badge"""]
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """google/owlvit-base-patch32"""
lowerCamelCase__ : Any = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [["""cat""", """nasa badge"""], ["""person"""]]
lowerCamelCase__ : Any = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Tuple = 16
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase__ )
lowerCamelCase__ : str = max([len(UpperCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = """google/owlvit-base-patch32"""
lowerCamelCase__ : Dict = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = ["""cat""", """nasa badge"""]
lowerCamelCase__ : Dict = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = 16
lowerCamelCase__ : int = inputs["""input_ids"""]
lowerCamelCase__ : int = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Any = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : int = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = processor(images=UpperCamelCase__ , query_images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Dict = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 1 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowercase ( _lowercase , unittest.TestCase ):
a = BertJapaneseTokenizer
a = False
a = True
def lowerCamelCase_ ( self: int ):
super().setUp()
lowerCamelCase__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
lowerCamelCase__ : int = """こんにちは、世界。 \nこんばんは、世界。"""
lowerCamelCase__ : Dict = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.get_input_output_texts(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def lowerCamelCase_ ( self: Tuple ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: List[str] ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: str ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : int = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(UpperCamelCase__ )
lowerCamelCase__ : str = """こんにちは、世界。\nこんばんは、世界。"""
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase__ , """wb""" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as handle:
lowerCamelCase__ : Dict = pickle.load(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: Any ):
try:
lowerCamelCase__ : Tuple = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: List[Any] ):
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = MecabTokenizer(do_lower_case=UpperCamelCase__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase_ ( self: Any ):
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(
do_lower_case=UpperCamelCase__ , normalize_text=UpperCamelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : int = MecabTokenizer(normalize_text=UpperCamelCase__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(UpperCamelCase__ )
lowerCamelCase__ : int = """こんにちは、世界。\nこんばんは、世界。"""
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase__ , """wb""" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as handle:
lowerCamelCase__ : List[Any] = pickle.load(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_sudachi
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = SudachiTokenizer(do_lower_case=UpperCamelCase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = SudachiTokenizer(normalize_text=UpperCamelCase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = SudachiTokenizer(trim_whitespace=UpperCamelCase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """こんにちは、世界。\nこんばんは、世界。"""
lowerCamelCase__ : Any = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(UpperCamelCase__ , """wb""" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as handle:
lowerCamelCase__ : Any = pickle.load(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_jumanpp
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = JumanppTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = JumanppTokenizer(normalize_text=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = JumanppTokenizer(trim_whitespace=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
lowerCamelCase__ : Optional[int] = {}
for i, token in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = i
lowerCamelCase__ : Any = WordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
lowerCamelCase__ : str = tokenizer.subword_tokenizer
lowerCamelCase__ : str = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(UpperCamelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
lowerCamelCase__ : str = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(UpperCamelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( _lowercase , unittest.TestCase ):
a = BertJapaneseTokenizer
a = False
def lowerCamelCase_ ( self: Optional[int] ):
super().setUp()
lowerCamelCase__ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: int ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str ):
lowerCamelCase__ : str = """こんにちは、世界。 \nこんばんは、世界。"""
lowerCamelCase__ : List[str] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def lowerCamelCase_ ( self: str ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: int ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: str ):
pass # TODO add if relevant
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
lowerCamelCase__ : List[Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
UpperCamelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowerCamelCase__ : Optional[Any] = {}
for i, token in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = i
lowerCamelCase__ : Union[str, Any] = CharacterTokenizer(vocab=UpperCamelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
lowerCamelCase__ : List[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = """cl-tohoku/bert-base-japanese"""
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
lowerCamelCase__ : Union[str, Any] = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 631 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_A : Optional[int] =300 # TEMPERATURE (unit = K)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> float:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_A : str =logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
_A : Dict ={
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_A : Optional[int] ={
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_A : Optional[int] =sorted(arg_to_scheduler.keys())
_A : Optional[Any] ='''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowercase ( pl.LightningModule ):
def __init__( self: Optional[int] , UpperCamelCase__: argparse.Namespace , UpperCamelCase__: Dict=None , UpperCamelCase__: List[Any]="base" , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Any=None , **UpperCamelCase__: str , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Union[str, Any] = Path(self.hparams.output_dir )
lowerCamelCase__ : Tuple = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase__ , **UpperCamelCase__ , )
else:
lowerCamelCase__ : PretrainedConfig = config
lowerCamelCase__ : int = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase__ , UpperCamelCase__ ):
assert hasattr(self.config , UpperCamelCase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCamelCase__ , getattr(self.hparams , UpperCamelCase__ ) )
if tokenizer is None:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase__ , )
else:
lowerCamelCase__ : PreTrainedTokenizer = tokenizer
lowerCamelCase__ : List[Any] = MODEL_MODES[mode]
if model is None:
lowerCamelCase__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase__ , )
else:
lowerCamelCase__ : int = model
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Tuple , **UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = self.model_type.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : int = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCamelCase__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCamelCase__ : Dict = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = self.model
lowerCamelCase__ : int = ["""bias""", """LayerNorm.weight"""]
lowerCamelCase__ : Union[str, Any] = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
lowerCamelCase__ : Union[str, Any] = Adafactor(
UpperCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase__ , relative_step=UpperCamelCase__ )
else:
lowerCamelCase__ : Any = AdamW(
UpperCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCamelCase__ : Tuple = optimizer
lowerCamelCase__ : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: List[Any] ):
return self.validation_step(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] ):
return self.validation_end(UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCamelCase__ : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] ):
if stage == "test":
lowerCamelCase__ : Union[str, Any] = len(self.test_dataloader().dataset )
else:
lowerCamelCase__ : Optional[Any] = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
lowerCamelCase__ : Dict = len(self.train_dataloader().dataset )
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: bool = False ):
raise NotImplementedError("""You must implement this for your task""" )
def lowerCamelCase_ ( self: Optional[int] ):
return self.train_loader
def lowerCamelCase_ ( self: Any ):
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[str, Any] ):
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
UpperCamelCase__ , list(filter(UpperCamelCase__ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict[str, Any] ):
lowerCamelCase__ : Optional[int] = self.output_dir.joinpath("""best_tfmr""" )
lowerCamelCase__ : Optional[Any] = self.step_count
self.model.save_pretrained(UpperCamelCase__ )
self.tokenizer.save_pretrained(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] ):
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCamelCase__ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(UpperCamelCase__ ).parent / """test_run""" / """cache""" ) , type=UpperCamelCase__ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=UpperCamelCase__ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=UpperCamelCase__ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=UpperCamelCase__ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=UpperCamelCase__ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5e-5 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=UpperCamelCase__ , metavar=UpperCamelCase__ , type=UpperCamelCase__ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=UpperCamelCase__ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=UpperCamelCase__ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=UpperCamelCase__ )
parser.add_argument("""--train_batch_size""" , default=32 , type=UpperCamelCase__ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=UpperCamelCase__ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class _lowercase ( pl.Callback ):
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: int ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowercase ( pl.Callback ):
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase__ )
class _lowercase ( pl.Callback ):
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str ):
lowerCamelCase__ : int = trainer.lr_schedulers[0]["""scheduler"""]
lowerCamelCase__ : Optional[Any] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: pl.Trainer , UpperCamelCase__: pl.LightningModule ):
rank_zero_info("""***** Validation results *****""" )
lowerCamelCase__ : Dict = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: pl.Trainer , UpperCamelCase__: pl.LightningModule ):
rank_zero_info("""***** Test results *****""" )
lowerCamelCase__ : Dict = trainer.callback_metrics
# Log and save results to file
lowerCamelCase__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(UpperCamelCase__ , """w""" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(UpperCamelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=UpperCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=UpperCamelCase )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=UpperCamelCase , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=UpperCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=UpperCamelCase , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(UpperCamelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=UpperCamelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=[] , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ) -> Any:
pl.seed_everything(args.seed )
# init model
lowerCamelCase__ : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
lowerCamelCase__ : Optional[int] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(UpperCamelCase )
if logging_callback is None:
lowerCamelCase__ : Optional[Any] = LoggingCallback()
lowerCamelCase__ : Optional[Any] = {}
if args.fpaa:
lowerCamelCase__ : Tuple = 16
if args.gpus > 1:
lowerCamelCase__ : List[str] = """auto"""
lowerCamelCase__ : Any = """ddp"""
lowerCamelCase__ : List[str] = args.accumulate_grad_batches
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = """auto"""
lowerCamelCase__ : Dict = pl.Trainer.from_argparse_args(
UpperCamelCase , weights_summary=UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **UpperCamelCase , )
if args.do_train:
trainer.fit(UpperCamelCase )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 631 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 1 |
'''simple docstring'''
import baseaa
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return baseaa.aaadecode(UpperCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Dict = SwinConfig()
lowerCamelCase__ : str = swin_name.split("""_""" )
lowerCamelCase__ : str = name_split[1]
lowerCamelCase__ : Optional[int] = int(name_split[4] )
lowerCamelCase__ : Dict = int(name_split[3][-1] )
if model_size == "tiny":
lowerCamelCase__ : List[Any] = 96
lowerCamelCase__ : Optional[int] = (2, 2, 6, 2)
lowerCamelCase__ : str = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase__ : Optional[Any] = 96
lowerCamelCase__ : Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase__ : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase__ : Any = 128
lowerCamelCase__ : Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase__ : Dict = (4, 8, 16, 32)
else:
lowerCamelCase__ : Optional[int] = 192
lowerCamelCase__ : Any = (2, 2, 18, 2)
lowerCamelCase__ : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCamelCase__ : Any = 21841
else:
lowerCamelCase__ : Optional[Any] = 1000
lowerCamelCase__ : Optional[int] = """huggingface/label-files"""
lowerCamelCase__ : Union[str, Any] = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : int = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = img_size
lowerCamelCase__ : List[Any] = num_classes
lowerCamelCase__ : Optional[Any] = embed_dim
lowerCamelCase__ : Optional[int] = depths
lowerCamelCase__ : Any = num_heads
lowerCamelCase__ : List[str] = window_size
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowerCamelCase__ : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[str] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCamelCase__ : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
lowerCamelCase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ : List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ : int = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
lowerCamelCase__ : int = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase__ : Any = """layernorm.bias"""
if "head" in name:
lowerCamelCase__ : List[Any] = name.replace("""head""" , """classifier""" )
else:
lowerCamelCase__ : List[Any] = """swin.""" + name
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[int] = orig_state_dict.pop(UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase__ : Optional[int] = key.split(""".""" )
lowerCamelCase__ : List[Any] = int(key_split[1] )
lowerCamelCase__ : Optional[int] = int(key_split[3] )
lowerCamelCase__ : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Union[str, Any] = val[:dim, :]
lowerCamelCase__ : Any = val[
dim : dim * 2, :
]
lowerCamelCase__ : List[str] = val[-dim:, :]
else:
lowerCamelCase__ : str = val[
:dim
]
lowerCamelCase__ : Any = val[
dim : dim * 2
]
lowerCamelCase__ : Any = val[
-dim:
]
else:
lowerCamelCase__ : Optional[Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[int] = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
lowerCamelCase__ : Optional[Any] = get_swin_config(UpperCamelCase )
lowerCamelCase__ : Dict = SwinForImageClassification(UpperCamelCase )
model.eval()
lowerCamelCase__ : Tuple = convert_state_dict(timm_model.state_dict() , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
lowerCamelCase__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCamelCase__ : List[str] = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Optional[int] = timm_model(inputs["""pixel_values"""] )
lowerCamelCase__ : Any = model(**UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : List[str] =parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[int]:
lowerCamelCase__ : List[str] = [True] * limit
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase__ : Any = i * 2
while index < limit:
lowerCamelCase__ : int = False
lowerCamelCase__ : Optional[Any] = index + i
lowerCamelCase__ : List[Any] = [2]
for i in range(3 , UpperCamelCase , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase )
return primes
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000000 ) -> int:
lowerCamelCase__ : Dict = prime_sieve(UpperCamelCase )
lowerCamelCase__ : int = 0
lowerCamelCase__ : List[Any] = 0
for i in range(len(UpperCamelCase ) ):
for j in range(i + length , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase__ : Union[str, Any] = j - i
lowerCamelCase__ : Tuple = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A : Tuple =logging.getLogger(__name__)
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: List[str]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowerCamelCase__ : Union[str, Any] = label_idx
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Union[Split, str] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = mode.value
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase__ , F'''{mode}.txt''' )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Dict = []
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
lowerCamelCase__ : int = []
lowerCamelCase__ : str = []
else:
lowerCamelCase__ : Any = line.split(""" """ )
words.append(splits[0] )
if len(UpperCamelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
return examples
def lowerCamelCase_ ( self: Any , UpperCamelCase__: TextIO , UpperCamelCase__: TextIO , UpperCamelCase__: List ):
lowerCamelCase__ : Any = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCamelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase__ : Any = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCamelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
if path:
with open(UpperCamelCase__ , """r""" ) as f:
lowerCamelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Any = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowercase ( _lowercase ):
def __init__( self: int ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
if path:
with open(UpperCamelCase__ , """r""" ) as f:
lowerCamelCase__ : Dict = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Optional[int] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[Split, str] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = mode.value
lowerCamelCase__ : Tuple = os.path.join(UpperCamelCase__ , F'''{mode}.txt''' )
lowerCamelCase__ : int = 1
lowerCamelCase__ : List[str] = []
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Union[str, Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
return examples
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: TextIO , UpperCamelCase__: TextIO , UpperCamelCase__: List ):
lowerCamelCase__ : str = 0
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = preds_list[example_id]
lowerCamelCase__ : Any = """"""
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(UpperCamelCase__ )
example_id += 1
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
if path:
with open(UpperCamelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 631 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any = image.size
lowerCamelCase__ , lowerCamelCase__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase__ : List[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
lowerCamelCase__ : Any = np.array(UpperCamelCase ).astype(np.floataa ) / 255.0
lowerCamelCase__ : int = image[None].transpose(0 , 3 , 1 , 2 )
lowerCamelCase__ : Optional[int] = torch.from_numpy(UpperCamelCase )
return 2.0 * image - 1.0
class _lowercase ( _lowercase ):
def __init__( self: int , UpperCamelCase__: VQModel , UpperCamelCase__: UNetaDModel , UpperCamelCase__: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[int] = 100 , UpperCamelCase__: Optional[float] = 0.0 , UpperCamelCase__: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , ):
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase__ : Any = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Tuple = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}''' )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase__ : List[str] = preprocess(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : str = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCamelCase__ : int = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCamelCase__ : List[Any] = next(self.unet.parameters() ).dtype
lowerCamelCase__ : Union[str, Any] = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
lowerCamelCase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ : Tuple = {}
if accepts_eta:
lowerCamelCase__ : Dict = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
lowerCamelCase__ : int = torch.cat([latents, image] , dim=1 )
lowerCamelCase__ : Union[str, Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
lowerCamelCase__ : Optional[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Dict = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
lowerCamelCase__ : List[str] = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
lowerCamelCase__ : List[Any] = image / 2 + 0.5
lowerCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : Dict = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 631 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( _lowercase ):
a = ["""image_processor""", """tokenizer"""]
a = """FlavaImageProcessor"""
a = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self: Dict , UpperCamelCase__: str=None , UpperCamelCase__: Any=None , **UpperCamelCase__: List[str] ):
lowerCamelCase__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCamelCase__ , )
lowerCamelCase__ : Dict = kwargs.pop("""feature_extractor""" )
lowerCamelCase__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.image_processor
def __call__( self: Optional[Any] , UpperCamelCase__: Optional[ImageInput] = None , UpperCamelCase__: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCamelCase__: bool = True , UpperCamelCase__: Union[bool, str, PaddingStrategy] = False , UpperCamelCase__: Union[bool, str, TruncationStrategy] = False , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: int = 0 , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = True , UpperCamelCase__: Optional[Union[str, TensorType]] = None , **UpperCamelCase__: str , ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCamelCase__ : str = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
if images is not None:
lowerCamelCase__ : Union[str, Any] = self.image_processor(
UpperCamelCase__ , return_image_mask=UpperCamelCase__ , return_codebook_pixels=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
if text is not None and images is not None:
encoding.update(UpperCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def lowerCamelCase_ ( self: str , *UpperCamelCase__: List[Any] , **UpperCamelCase__: str ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: Tuple , **UpperCamelCase__: Union[str, Any] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Tuple = self.tokenizer.model_input_names
lowerCamelCase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self: Optional[int] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self: List[str] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , )
return self.image_processor
| 631 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Dict=13 , UpperCamelCase__: Tuple=30 , UpperCamelCase__: str=2 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=True , UpperCamelCase__: Any=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Any=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: str="gelu" , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Optional[Any]=10 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Union[str, Any] = patch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Union[str, Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : List[str] = (image_size // patch_size) ** 2
lowerCamelCase__ : Union[str, Any] = num_patches + 1
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = ViTMSNModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Dict = self.type_sequence_label_size
lowerCamelCase__ : int = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , labels=UpperCamelCase__ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : str = 1
lowerCamelCase__ : Optional[Any] = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : int = ViTMSNModelTester(self )
lowerCamelCase__ : str = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = ViTMSNModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Dict ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: str ):
torch.manual_seed(2 )
lowerCamelCase__ : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.default_image_processor
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 1 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 1 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
lowerCamelCase__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCamelCase__ : Optional[Any] = float(factorial(UpperCamelCase ) )
coefficient /= factorial(UpperCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 631 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( _lowercase , unittest.TestCase ):
a = XLMTokenizer
a = False
def lowerCamelCase_ ( self: Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ : Any = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Tuple = """lower newer"""
lowerCamelCase__ : List[Any] = """lower newer"""
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : str = """lower"""
lowerCamelCase__ : int = ["""low""", """er</w>"""]
lowerCamelCase__ : str = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokens + ["""<unk>"""]
lowerCamelCase__ : str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
lowerCamelCase__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 631 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_A : int =Lock()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase__ : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase__ : Optional[Any] = min(UpperCamelCase , UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase__ : int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase__ : Dict = max(UpperCamelCase , UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase__ : int = Pipe()
lowerCamelCase__ : Any = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase__ : Any = temp_rs
lowerCamelCase__ : Optional[Any] = temp_rr
for i in range(1 , len(UpperCamelCase ) - 1 ):
lowerCamelCase__ : Dict = Pipe()
lowerCamelCase__ : str = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase__ : int = temp_rs
lowerCamelCase__ : int = temp_rr
process_array_.append(
Process(
target=UpperCamelCase , args=(
len(UpperCamelCase ) - 1,
arr[len(UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase ) ):
lowerCamelCase__ : Optional[int] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ : List[Any] = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*UpperCamelCase )
lowerCamelCase__ : int = odd_even_transposition(UpperCamelCase )
print("""Sorted List\n""" )
print(*UpperCamelCase )
if __name__ == "__main__":
main()
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 1 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = grid.shape
lowerCamelCase__ : str = [-1, 1, 0, 0]
lowerCamelCase__ : Optional[int] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = [(0, source)], set()
lowerCamelCase__ : Optional[Any] = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : Any = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : Tuple = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Dict = dist + 1
lowerCamelCase__ : List[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase__ : str = point_y / 4 / point_x
lowerCamelCase__ : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase__ : Optional[int] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase__ : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase__ : Union[str, Any] = outgoing_gradient**2 + 4
lowerCamelCase__ : Any = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase__ : int = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase__ : List[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase__ : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase__ : List[str] = x_minus if isclose(UpperCamelCase , UpperCamelCase ) else x_plus
lowerCamelCase__ : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1.4 , UpperCamelCase = -9.6 ) -> int:
lowerCamelCase__ : int = 0
lowerCamelCase__ : float = first_x_coord
lowerCamelCase__ : float = first_y_coord
lowerCamelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = next_point(UpperCamelCase , UpperCamelCase , UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'{solution() = }')
| 631 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> List[str]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowerCamelCase__ : Union[str, Any] = nn.Parameter(UpperCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowerCamelCase__ : Union[str, Any] = nn.Parameter(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
# set torch weights for 1-to-1 comparison
lowerCamelCase__ : Tuple = np.asarray(weights[0] )
lowerCamelCase__ : Dict = np.asarray(weights[1] )
lowerCamelCase__ : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase ).view(-1 , UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
# set torch weights for 1-to-1 comparison
lowerCamelCase__ : str = np.asarray(weights[0] )
lowerCamelCase__ : List[str] = np.asarray(weights[1] )
lowerCamelCase__ : str = np.asarray(weights[2] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase ).view(-1 , UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
# layernorm 1
lowerCamelCase__ : Optional[Any] = weights[0][0][0]
lowerCamelCase__ : Dict = np.asarray(layer_norm_a[0] )
lowerCamelCase__ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# lsh weights + output
lowerCamelCase__ : Any = weights[0][1]
if len(UpperCamelCase ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase , torch_block.attention , UpperCamelCase )
else:
set_layer_weights_in_torch_local(UpperCamelCase , torch_block.attention , UpperCamelCase )
# intermediate weighs
lowerCamelCase__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase ) == 4:
lowerCamelCase__ : str = intermediate_weights[2]
# layernorm 2
lowerCamelCase__ : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# intermediate dense
lowerCamelCase__ : Optional[int] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
# intermediate out
lowerCamelCase__ : str = np.asarray(intermediate_weights[4][0] )
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
# reformer model
lowerCamelCase__ : int = torch_model.reformer
# word embeds
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase ) , )
if isinstance(weights[3] , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase__ : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowerCamelCase__ : Optional[int] = nn.Parameter(torch.tensor(UpperCamelCase ) )
lowerCamelCase__ : Tuple = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase__ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# output layer norm
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[7][0] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# output embeddings
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[9][0] )
lowerCamelCase__ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
# Initialise PyTorch model
lowerCamelCase__ : Optional[Any] = ReformerConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ : int = ReformerModelWithLMHead(UpperCamelCase )
with open(UpperCamelCase , """rb""" ) as f:
lowerCamelCase__ : Union[str, Any] = pickle.load(UpperCamelCase )["""weights"""]
set_model_weights_in_torch(UpperCamelCase , UpperCamelCase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : Tuple =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 631 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self: int , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=13 , UpperCamelCase__: int=7 , UpperCamelCase__: int=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[int]=99 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Dict=5 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: Union[str, Any]="gelu" , UpperCamelCase__: str=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[str]=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Union[str, Any]=4 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : Optional[Any] = use_attention_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Tuple = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[str] = num_choices
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_attention_mask:
lowerCamelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = config_and_inputs
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowercase ( _lowercase , unittest.TestCase ):
a = True
a = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCamelCase_ ( self: str ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
lowerCamelCase__ : Union[str, Any] = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=UpperCamelCase__ )
lowerCamelCase__ : Any = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Optional[int] = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 631 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( _lowercase , unittest.TestCase ):
a = KandinskyVaaControlnetImgaImgPipeline
a = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
a = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
a = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a = False
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return 32
@property
def lowerCamelCase_ ( self: int ):
return 32
@property
def lowerCamelCase_ ( self: Dict ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self: int ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return 100
@property
def lowerCamelCase_ ( self: str ):
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase__ : Optional[Any] = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self: Any ):
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = self.dummy_unet
lowerCamelCase__ : Any = self.dummy_movq
lowerCamelCase__ : str = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCamelCase__ : int = DDIMScheduler(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase_ ( self: int , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str]=0 ):
lowerCamelCase__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
lowerCamelCase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Tuple = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
lowerCamelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowerCamelCase__ : List[str] = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase__ : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = """cpu"""
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase__ : int = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCamelCase__ : Dict = output.images
lowerCamelCase__ : List[Any] = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : int = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
lowerCamelCase__ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase__ : Optional[int] = init_image.resize((512, 512) )
lowerCamelCase__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
lowerCamelCase__ : Tuple = torch.from_numpy(np.array(UpperCamelCase__ ) ).float() / 255.0
lowerCamelCase__ : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase__ : Dict = """A robot, 4k photo"""
lowerCamelCase__ : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
lowerCamelCase__ : Union[str, Any] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = pipe_prior(
UpperCamelCase__ , image=UpperCamelCase__ , strength=0.85 , generator=UpperCamelCase__ , negative_prompt="""""" , ).to_tuple()
lowerCamelCase__ : int = pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , hint=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
lowerCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 631 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : int = BlipImageProcessor()
lowerCamelCase__ : Tuple = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ : List[Any] = BlipProcessor(UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: str ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase__: Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def lowerCamelCase_ ( self: List[str] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Any = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : int = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Dict = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : List[str] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Optional[int] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Tuple = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 631 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str=13 , UpperCamelCase__: List[Any]=7 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: str=True , UpperCamelCase__: Any=99 , UpperCamelCase__: int=32 , UpperCamelCase__: Optional[int]=5 , UpperCamelCase__: Any=4 , UpperCamelCase__: Tuple=37 , UpperCamelCase__: str="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Any=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: str=2 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: int=4 , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : List[str] = seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = use_attention_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : str = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Dict = num_choices
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_attention_mask:
lowerCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : str = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = config_and_inputs
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( _lowercase , unittest.TestCase ):
a = True
a = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self: str ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class_name.from_pretrained("""roberta-base""" , from_pt=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 631 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: float = 0 ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = row, column
lowerCamelCase__ : int = [[default_value for c in range(UpperCamelCase__ )] for r in range(UpperCamelCase__ )]
def __str__( self: Dict ):
lowerCamelCase__ : Dict = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase__ : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ : Dict = max(UpperCamelCase__ , len(str(UpperCamelCase__ ) ) )
lowerCamelCase__ : int = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCamelCase__: list[float] ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ : Dict = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase__ ) for row_vector in self.array )
return s
def __repr__( self: Any ):
return str(self )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: tuple[int, int] ):
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and len(UpperCamelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self: Dict , UpperCamelCase__: tuple[int, int] ):
assert self.validate_indicies(UpperCamelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self: int , UpperCamelCase__: tuple[int, int] , UpperCamelCase__: float ):
assert self.validate_indicies(UpperCamelCase__ )
lowerCamelCase__ : Dict = value
def __add__( self: Optional[int] , UpperCamelCase__: Matrix ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Dict = self[r, c] + another[r, c]
return result
def __neg__( self: int ):
lowerCamelCase__ : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : List[Any] = -self[r, c]
return result
def __sub__( self: List[Any] , UpperCamelCase__: Matrix ):
return self + (-another)
def __mul__( self: Optional[int] , UpperCamelCase__: int | float | Matrix ):
if isinstance(UpperCamelCase__ , (int, float) ): # Scalar multiplication
lowerCamelCase__ : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : List[str] = self[r, c] * another
return result
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ : Optional[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ : Any = F'''Unsupported type given for another ({type(UpperCamelCase__ )})'''
raise TypeError(UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Any = self[r, c]
return result
def lowerCamelCase_ ( self: str , UpperCamelCase__: Matrix , UpperCamelCase__: Matrix ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ : Optional[Any] = v.transpose()
lowerCamelCase__ : Optional[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ () -> None:
# a^(-1)
lowerCamelCase__ : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ : Optional[Any] = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase__ : Dict = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 1, 2, -3
lowerCamelCase__ : int = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase , UpperCamelCase )}''' )
def SCREAMING_SNAKE_CASE_ () -> None:
import doctest
doctest.testmod()
testa()
| 631 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ) -> List[Any]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : Tuple = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
lowerCamelCase__ : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : str = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: int=13 , UpperCamelCase__: List[str]=7 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Optional[Any]=99 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: List[str]=4 , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Any="relu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: int=20 , UpperCamelCase__: Dict=2 , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: List[Any]=0 , ):
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Dict = seq_length
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = encoder_layerdrop
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : Tuple = bos_token_id
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Tuple = self.eos_token_id # Eos Token
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : Dict = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : str = self.get_config()
lowerCamelCase__ : Dict = prepare_mam_aaa_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self: Optional[int] ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self: int , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[str] = MaMaaaModel(config=UpperCamelCase__ ).get_decoder().to(UpperCamelCase__ ).eval()
lowerCamelCase__ : List[Any] = inputs_dict["""input_ids"""]
lowerCamelCase__ : Union[str, Any] = inputs_dict["""attention_mask"""]
lowerCamelCase__ : Optional[Any] = inputs_dict["""head_mask"""]
# first forward pass
lowerCamelCase__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : Dict = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )["""last_hidden_state"""]
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[
"""last_hidden_state"""
]
# select random slice
lowerCamelCase__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Optional[Any] = MaMaaaModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
lowerCamelCase__ : Tuple = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.encoder_last_hidden_state
lowerCamelCase__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Optional[int] = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = MaMaaaEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : str = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = MaMaaaDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
a = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a = True
a = True
a = False
a = False
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: int ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = MaMaaaModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Tuple = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
if not self.is_encoder_decoder:
lowerCamelCase__ : List[Any] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCamelCase__ : Tuple = inputs["""input_ids"""]
lowerCamelCase__ : int = inputs.get("""decoder_input_ids""" , UpperCamelCase__ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , UpperCamelCase__ )
lowerCamelCase__ : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase__ : str = wte(UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = wte(UpperCamelCase__ )
lowerCamelCase__ : Tuple = wte(UpperCamelCase__ )
with torch.no_grad():
model(**UpperCamelCase__ )[0]
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] = input_dict["""input_ids"""]
lowerCamelCase__ : Tuple = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = MaMaaaForConditionalGeneration(UpperCamelCase__ ).eval().to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
model.generate(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
model.generate(num_beams=4 , do_sample=UpperCamelCase__ , early_stopping=UpperCamelCase__ , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
return torch.tensor(UpperCamelCase , dtype=torch.long , device=UpperCamelCase )
_A : List[str] =1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Union[str, Any] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase__ : Union[str, Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase__ : List[str] = prepare_mam_aaa_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase__ )[0]
lowerCamelCase__ : List[str] = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
lowerCamelCase__ : Any = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCamelCase__ )
# change to intended input
lowerCamelCase__ : int = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase__ : List[str] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase__ : str = prepare_mam_aaa_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ )[0]
lowerCamelCase__ : Union[str, Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
lowerCamelCase__ : int = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCamelCase__ : Any = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase__ : Dict = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model.generate(
input_ids=dct["""input_ids"""].to(UpperCamelCase__ ) , attention_mask=dct["""attention_mask"""].to(UpperCamelCase__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCamelCase__ : Union[str, Any] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCamelCase__ : Any = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
assert generated == expected_en
| 631 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_A : Dict =logging.get_logger(__name__)
enable_full_determinism()
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = UNetaDModel
a = """sample"""
@property
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : Union[str, Any] = 3
lowerCamelCase__ : int = (32, 32)
lowerCamelCase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return (3, 32, 32)
@property
def lowerCamelCase_ ( self: int ):
return (3, 32, 32)
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Dict = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = UNetaDModel
a = """sample"""
@property
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : str = 4
lowerCamelCase__ : Any = (32, 32)
lowerCamelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self: List[str] ):
return (4, 32, 32)
@property
def lowerCamelCase_ ( self: Dict ):
return (4, 32, 32)
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCamelCase__ : str = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
lowerCamelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase_ ( self: List[str] ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCamelCase__ , lowerCamelCase__ : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
lowerCamelCase__ : List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ : Tuple = noise.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase__ , lowerCamelCase__ : List[str] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
lowerCamelCase__ : Union[str, Any] = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ : Dict = noise.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase__ : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase__ : int = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = UNetaDModel
a = """sample"""
@property
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Dict=(32, 32) ):
lowerCamelCase__ : Dict = 4
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self: int ):
return (3, 32, 32)
@property
def lowerCamelCase_ ( self: Dict ):
return (3, 32, 32)
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCamelCase__ : int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : str = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.dummy_input
lowerCamelCase__ : List[Any] = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = noise
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : str = 3
lowerCamelCase__ : Optional[int] = (256, 256)
lowerCamelCase__ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase__ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ : int = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCamelCase__ )
lowerCamelCase__ : int = 4
lowerCamelCase__ : int = 3
lowerCamelCase__ : Any = (32, 32)
lowerCamelCase__ : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase__ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ : Any = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCamelCase_ ( self: Union[str, Any] ):
# not required for this model
pass
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_A : Union[str, Any] =get_logger()
_A : Optional[dict] =None
class _lowercase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self: List[Any] , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Union[str, Any]=None , **UpperCamelCase__: Optional[int] ):
super().__init__(features=UpperCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase__ )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
lowerCamelCase__ : List[str] = device if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowerCamelCase__ : str = str(jax.devices()[0] )
lowerCamelCase__ : List[Any] = jnp_array_kwargs
@staticmethod
def lowerCamelCase_ ( ):
import jax
return {str(UpperCamelCase__ ): device for device in jax.devices()}
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase__ , axis=0 )
return column
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[Any] ):
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ : int = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase__ : Union[str, Any] = {"""dtype""": jnp.intaa}
else:
lowerCamelCase__ : Any = {"""dtype""": jnp.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase__ : List[str] = np.asarray(UpperCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[Any] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase__ , """__array__""" ) and not isinstance(UpperCamelCase__ , jax.Array ):
lowerCamelCase__ : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: dict ):
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: pa.Table ):
lowerCamelCase__ : str = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: pa.Table ):
lowerCamelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowerCamelCase__ : int = self.recursive_tensorize(UpperCamelCase__ )
lowerCamelCase__ : Any = self._consolidate(UpperCamelCase__ )
return column
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: pa.Table ):
lowerCamelCase__ : Any = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowerCamelCase__ : int = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowerCamelCase__ : int = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowerCamelCase__ : Tuple = self._consolidate(batch[column_name] )
return batch
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_A : Any =TypeVar('''T''')
class _lowercase ( Generic[T] ):
def __init__( self: Optional[Any] , UpperCamelCase__: list[T] , UpperCamelCase__: Callable[[T, T], T] ):
lowerCamelCase__ : Any | T = None
lowerCamelCase__ : int = len(UpperCamelCase__ )
lowerCamelCase__ : list[T] = [any_type for _ in range(self.N )] + arr
lowerCamelCase__ : Any = fnc
self.build()
def lowerCamelCase_ ( self: List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCamelCase__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: T ):
p += self.N
lowerCamelCase__ : int = v
while p > 1:
lowerCamelCase__ : str = p // 2
lowerCamelCase__ : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: int , UpperCamelCase__: int ): # noqa: E741
lowerCamelCase__ , lowerCamelCase__ : Dict = l + self.N, r + self.N
lowerCamelCase__ : T | None = None
while l <= r:
if l % 2 == 1:
lowerCamelCase__ : str = self.st[l] if res is None else self.fn(UpperCamelCase__ , self.st[l] )
if r % 2 == 0:
lowerCamelCase__ : List[Any] = self.st[r] if res is None else self.fn(UpperCamelCase__ , self.st[r] )
lowerCamelCase__ , lowerCamelCase__ : int = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_A : Any =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_A : Union[str, Any] ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_A : int =SegmentTree(test_array, min)
_A : Tuple =SegmentTree(test_array, max)
_A : Optional[int] =SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE_ () -> None:
for i in range(len(UpperCamelCase ) ):
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : Optional[int] = reduce(UpperCamelCase , test_array[i : j + 1] )
lowerCamelCase__ : Tuple = reduce(UpperCamelCase , test_array[i : j + 1] )
lowerCamelCase__ : Optional[int] = reduce(lambda UpperCamelCase , UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCamelCase , UpperCamelCase )
assert max_range == max_segment_tree.query(UpperCamelCase , UpperCamelCase )
assert sum_range == sum_segment_tree.query(UpperCamelCase , UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
_A : Optional[Any] =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 631 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 600851475143 ) -> int:
try:
lowerCamelCase__ : Dict = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCamelCase__ : List[str] = i
while n % i == 0:
lowerCamelCase__ : Tuple = n // i
i += 1
return int(UpperCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 631 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
_A : List[str] ='''Muhammad Umer Farooq'''
_A : List[Any] ='''MIT'''
_A : Union[str, Any] ='''1.0.0'''
_A : str ='''Muhammad Umer Farooq'''
_A : Tuple ='''contact@muhammadumerfarooq.me'''
_A : Union[str, Any] ='''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: str ):
super().__init__()
lowerCamelCase__ : list[str] = []
lowerCamelCase__ : str = domain
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCamelCase__ : int = parse.urljoin(self.domain , UpperCamelCase__ )
self.urls.append(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return ".".join(get_sub_domain_name(UpperCamelCase ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return parse.urlparse(UpperCamelCase ).netloc
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "https://github.com" ) -> list[str]:
lowerCamelCase__ : int = get_domain_name(UpperCamelCase )
# Initialize the parser
lowerCamelCase__ : Union[str, Any] = Parser(UpperCamelCase )
try:
# Open URL
lowerCamelCase__ : Union[str, Any] = requests.get(UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCamelCase__ : Any = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCamelCase__ : Dict = requests.get(UpperCamelCase )
# Get the valid email.
lowerCamelCase__ : Any = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[Any] =emails_from_url('''https://github.com''')
print(F'{len(emails)} emails found:')
print('''\n'''.join(sorted(emails)))
| 631 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 1 |
'''simple docstring'''
import heapq
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> set[int]:
lowerCamelCase__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCamelCase , [-1 * len(UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase__ : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase__ : Dict = heapq.heappop(UpperCamelCase )[1][0]
chosen_vertices.add(UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase__ : Dict = elem[1][1].index(UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : str ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCamelCase__ : Tuple = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE_ () -> Dict:
assert _test_patching.open is open
lowerCamelCase__ : str = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
# pandas.read_csv is not present in _test_patching
lowerCamelCase__ : Optional[int] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , UpperCamelCase ):
pass
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowerCamelCase__ : Optional[int] = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , UpperCamelCase ) is None
with patch_submodule(_test_patching , """len""" , UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Optional[Any] = """__test_patch_submodule_start_and_stop_mock__"""
lowerCamelCase__ : Dict = patch_submodule(_test_patching , """open""" , UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ () -> str:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCamelCase__ : int = """__test_patch_submodule_successive_join__"""
lowerCamelCase__ : Union[str, Any] = """__test_patch_submodule_successive_dirname__"""
lowerCamelCase__ : Tuple = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase ):
with patch_submodule(_test_patching , """os.rename""" , UpperCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , UpperCamelCase ):
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ : Tuple = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , UpperCamelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , UpperCamelCase ):
pass
| 631 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple=13 , UpperCamelCase__: List[str]=7 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: str=True , UpperCamelCase__: List[str]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: str=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Any=37 , UpperCamelCase__: Dict="gelu" , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: int=512 , UpperCamelCase__: Any=16 , UpperCamelCase__: Any=2 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Any=3 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: List[Any]=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Dict = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[int] = num_labels
lowerCamelCase__ : Any = num_choices
lowerCamelCase__ : Optional[int] = scope
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Any = None
if self.use_input_mask:
lowerCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : int = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Any ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Optional[Any] = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : int = True
lowerCamelCase__ : List[str] = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowerCamelCase__ : Any = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : List[Any] = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , ):
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[str] = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowerCamelCase__ : Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowerCamelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase__ : int = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowerCamelCase__ : int = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowerCamelCase__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
a = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a = (OpenLlamaForCausalLM,) if is_torch_available() else ()
a = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[str] = OpenLlamaModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Any = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = 3
lowerCamelCase__ : Any = input_dict["""input_ids"""]
lowerCamelCase__ : List[Any] = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Tuple = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str = 3
lowerCamelCase__ : Optional[int] = """single_label_classification"""
lowerCamelCase__ : List[str] = input_dict["""input_ids"""]
lowerCamelCase__ : List[str] = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str = 3
lowerCamelCase__ : str = """multi_label_classification"""
lowerCamelCase__ : Any = input_dict["""input_ids"""]
lowerCamelCase__ : Optional[Any] = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : List[Any] = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def lowerCamelCase_ ( self: Any ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase__ : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Union[str, Any] = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(UpperCamelCase__ ).last_hidden_state
lowerCamelCase__ : int = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
lowerCamelCase__ : Any = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowerCamelCase__ : Union[str, Any] = scaled_model(UpperCamelCase__ ).last_hidden_state
lowerCamelCase__ : Any = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[list[int]]:
lowerCamelCase__ : list[list[int]] = []
create_all_state(1 , UpperCamelCase , UpperCamelCase , [] , UpperCamelCase )
return result
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase , total_number - level + 2 ):
current_list.append(UpperCamelCase )
create_all_state(i + 1 , UpperCamelCase , level - 1 , UpperCamelCase , UpperCamelCase )
current_list.pop()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> None:
for i in total_list:
print(*UpperCamelCase )
if __name__ == "__main__":
_A : int =4
_A : List[Any] =2
_A : int =generate_all_combinations(n, k)
print_all_state(total_list)
| 631 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_A : Union[str, Any] =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
return max(metric_fn(UpperCamelCase , UpperCamelCase ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : List[Any] = []
if args.gold_data_mode == "qa":
lowerCamelCase__ : str = pd.read_csv(UpperCamelCase , sep="""\t""" , header=UpperCamelCase )
for answer_list in data[1]:
lowerCamelCase__ : Union[str, Any] = ast.literal_eval(UpperCamelCase )
answers.append(UpperCamelCase )
else:
lowerCamelCase__ : Optional[Any] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : Union[str, Any] = [[reference] for reference in references]
lowerCamelCase__ : Optional[int] = 0
for prediction, ground_truths in zip(UpperCamelCase , UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
fa += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 100.0 * em / total
lowerCamelCase__ : str = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = args.k
lowerCamelCase__ : int = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : Tuple = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()]
lowerCamelCase__ : str = 0
for hypo, reference in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Dict = set(hypo.split("""\t""" )[:k] )
lowerCamelCase__ : Optional[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCamelCase__ : List[str] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
def strip_title(UpperCamelCase ):
if title.startswith("""\"""" ):
lowerCamelCase__ : Tuple = title[1:]
if title.endswith("""\"""" ):
lowerCamelCase__ : Any = title[:-1]
return title
lowerCamelCase__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase , )["""input_ids"""].to(args.device )
lowerCamelCase__ : Optional[Any] = rag_model.rag.question_encoder(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = question_enc_outputs[0]
lowerCamelCase__ : List[str] = rag_model.retriever(
UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
lowerCamelCase__ : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCamelCase__ : List[Any] = []
for docs in all_docs:
lowerCamelCase__ : Optional[int] = [strip_title(UpperCamelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(UpperCamelCase ) )
return provenance_strings
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
with torch.no_grad():
lowerCamelCase__ : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase )
lowerCamelCase__ : Tuple = inputs_dict.input_ids.to(args.device )
lowerCamelCase__ : Tuple = inputs_dict.attention_mask.to(args.device )
lowerCamelCase__ : Dict = rag_model.generate( # rag_model overwrites generate
UpperCamelCase , attention_mask=UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCamelCase__ : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
if args.print_predictions:
for q, a in zip(UpperCamelCase , UpperCamelCase ):
logger.info("""Q: {} - A: {}""".format(UpperCamelCase , UpperCamelCase ) )
return answers
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=UpperCamelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=UpperCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=UpperCamelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=UpperCamelCase , type=UpperCamelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=UpperCamelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=UpperCamelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=UpperCamelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=UpperCamelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=UpperCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=UpperCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=UpperCamelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=UpperCamelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=UpperCamelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
lowerCamelCase__ : Optional[Any] = parser.parse_args()
lowerCamelCase__ : str = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = {}
if args.model_type is None:
lowerCamelCase__ : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
lowerCamelCase__ : Union[str, Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowerCamelCase__ : Tuple = args.n_docs
if args.index_name is not None:
lowerCamelCase__ : Dict = args.index_name
if args.index_path is not None:
lowerCamelCase__ : Optional[Any] = args.index_path
else:
lowerCamelCase__ : Tuple = BartForConditionalGeneration
lowerCamelCase__ : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , UpperCamelCase )
lowerCamelCase__ : Any = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowerCamelCase__ : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(UpperCamelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
lowerCamelCase__ : Optional[int] = RagRetriever.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ : Optional[int] = model_class.from_pretrained(UpperCamelCase , retriever=UpperCamelCase , **UpperCamelCase )
model.retriever.init_retrieval()
else:
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
lowerCamelCase__ : Optional[Any] = []
for line in tqdm(UpperCamelCase ):
questions.append(line.strip() )
if len(UpperCamelCase ) == args.eval_batch_size:
lowerCamelCase__ : Dict = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write("""\n""".join(UpperCamelCase ) + """\n""" )
preds_file.flush()
lowerCamelCase__ : Dict = []
if len(UpperCamelCase ) > 0:
lowerCamelCase__ : Dict = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
preds_file.write("""\n""".join(UpperCamelCase ) )
preds_file.flush()
score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_A : int =get_args()
main(args)
| 631 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
try:
lowerCamelCase__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase__ : Optional[Any] = strtobool(UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_A : Dict =parse_flag_from_env('''RUN_SLOW''', default=False)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return unittest.skip("""Test was skipped""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=None , UpperCamelCase=None ) -> List[Any]:
if test_case is None:
return partial(UpperCamelCase , version=UpperCamelCase )
return unittest.skipUnless(is_torch_version(""">=""" , UpperCamelCase ) , f'''test requires torch version >= {version}''' )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCamelCase )
_A : Union[str, Any] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCamelCase )
class _lowercase ( unittest.TestCase ):
a = True
@classmethod
def lowerCamelCase_ ( cls: Dict ):
lowerCamelCase__ : str = tempfile.mkdtemp()
@classmethod
def lowerCamelCase_ ( cls: Optional[int] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase_ ( self: int ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[mock.Mock, List[mock.Mock]] ):
lowerCamelCase__ : List[str] = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Tuple = AcceleratorState()
lowerCamelCase__ : List[str] = tensor[None].clone().to(state.device )
lowerCamelCase__ : Optional[Any] = gather(UpperCamelCase ).cpu()
lowerCamelCase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase ):
return False
return True
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = returncode
lowerCamelCase__ : Tuple = stdout
lowerCamelCase__ : Union[str, Any] = stderr
async def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any:
while True:
lowerCamelCase__ : str = await stream.readline()
if line:
callback(UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(UpperCamelCase ) )
lowerCamelCase__ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase__ : Any = []
lowerCamelCase__ : Union[str, Any] = []
def tee(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="" ):
lowerCamelCase__ : Union[str, Any] = line.decode("""utf-8""" ).rstrip()
sink.append(UpperCamelCase )
if not quiet:
print(UpperCamelCase , UpperCamelCase , file=UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase : tee(UpperCamelCase , UpperCamelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase : tee(UpperCamelCase , UpperCamelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=UpperCamelCase , )
return _RunOutput(await p.wait() , UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=180 , UpperCamelCase=False , UpperCamelCase=True ) -> _RunOutput:
lowerCamelCase__ : Dict = asyncio.get_event_loop()
lowerCamelCase__ : int = loop.run_until_complete(
_stream_subprocess(UpperCamelCase , env=UpperCamelCase , stdin=UpperCamelCase , timeout=UpperCamelCase , quiet=UpperCamelCase , echo=UpperCamelCase ) )
lowerCamelCase__ : str = """ """.join(UpperCamelCase )
if result.returncode > 0:
lowerCamelCase__ : int = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _lowercase ( _lowercase ):
pass
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
try:
lowerCamelCase__ : Union[str, Any] = subprocess.check_output(UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase , """decode""" ):
lowerCamelCase__ : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : str ={'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =[
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =[
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_A : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowercase ( _lowercase , _lowercase ):
a = 1
@register_to_config
def __init__( self: List[Any] , UpperCamelCase__: str=2_000 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Dict=20 , UpperCamelCase__: str=1e-3 ):
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Union[str, torch.device] = None ):
lowerCamelCase__ : Any = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase__ , device=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Dict , UpperCamelCase__: int=None ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase__ : List[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase__ : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase__ : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase__ : Optional[int] = std.unsqueeze(-1 )
lowerCamelCase__ : List[str] = -score / std
# compute
lowerCamelCase__ : Any = -1.0 / len(self.timesteps )
lowerCamelCase__ : List[Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase__ : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase__ : str = beta_t.unsqueeze(-1 )
lowerCamelCase__ : str = -0.5 * beta_t * x
lowerCamelCase__ : Dict = torch.sqrt(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = drift - diffusion**2 * score
lowerCamelCase__ : Tuple = x + drift * dt
# add noise
lowerCamelCase__ : Union[str, Any] = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase__ , device=x.device , dtype=x.dtype )
lowerCamelCase__ : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 631 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( _lowercase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
a = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[Any]=0 ):
lowerCamelCase__ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
lowerCamelCase__ : Dict = torch.manual_seed(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs()
lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Any = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : int = self.get_dummy_inputs()
lowerCamelCase__ : int = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Union[str, Any] = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs()
lowerCamelCase__ : Dict = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs()
lowerCamelCase__ : int = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Dict = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCamelCase__ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs()
lowerCamelCase__ : Dict = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : int = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = ort.SessionOptions()
lowerCamelCase__ : List[Any] = False
return options
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCamelCase__ : Union[str, Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCamelCase__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = """A fantasy landscape, trending on artstation"""
lowerCamelCase__ : int = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" , )
lowerCamelCase__ : int = output.images
lowerCamelCase__ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCamelCase__ : Union[str, Any] = init_image.resize((128, 128) )
lowerCamelCase__ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
lowerCamelCase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Tuple = """A fantasy landscape, trending on artstation"""
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
lowerCamelCase__ : int = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="""np""" , )
lowerCamelCase__ : Optional[Any] = output.images
lowerCamelCase__ : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase__ : int = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 631 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.